mirror of
https://github.com/yggdrasil-network/yggdrasil-go.git
synced 2025-04-30 07:05:06 +03:00
Merge branch 'future' into neilalexander/mdns
This commit is contained in:
commit
2ec3c4a0d5
84 changed files with 2557 additions and 11161 deletions
|
@ -3,9 +3,22 @@
|
|||
# Check https://circleci.com/docs/2.0/language-go/ for more details
|
||||
version: 2.1
|
||||
jobs:
|
||||
lint:
|
||||
docker:
|
||||
- image: circleci/golang:1.16
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Run golangci-lint
|
||||
command: |
|
||||
go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.31.0
|
||||
golangci-lint run
|
||||
|
||||
build-linux:
|
||||
docker:
|
||||
- image: circleci/golang:1.14.1
|
||||
- image: circleci/golang:1.16
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
@ -106,11 +119,11 @@ jobs:
|
|||
echo -e "Host *\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config
|
||||
|
||||
- run:
|
||||
name: Install Go 1.14.1
|
||||
name: Install Go 1.16
|
||||
command: |
|
||||
cd /tmp
|
||||
curl -LO https://dl.google.com/go/go1.14.1.darwin-amd64.pkg
|
||||
sudo installer -pkg /tmp/go1.14.1.darwin-amd64.pkg -target /
|
||||
curl -LO https://dl.google.com/go/go1.16.darwin-amd64.pkg
|
||||
sudo installer -pkg /tmp/go1.16.darwin-amd64.pkg -target /
|
||||
|
||||
#- run:
|
||||
# name: Install Gomobile
|
||||
|
@ -144,9 +157,46 @@ jobs:
|
|||
paths:
|
||||
- upload
|
||||
|
||||
build-windows:
|
||||
docker:
|
||||
- image: circleci/golang:1.16
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Create artifact upload directory and set variables
|
||||
command: |
|
||||
mkdir /tmp/upload
|
||||
echo 'export CINAME=$(sh contrib/semver/name.sh)' >> $BASH_ENV
|
||||
echo 'export CIVERSION=$(sh contrib/semver/version.sh --bare)' >> $BASH_ENV
|
||||
git config --global user.email "$(git log --format='%ae' HEAD -1)";
|
||||
git config --global user.name "$(git log --format='%an' HEAD -1)";
|
||||
|
||||
- run:
|
||||
name: Install tools
|
||||
command: |
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install msitools wixl
|
||||
|
||||
- run:
|
||||
name: Build for Windows
|
||||
command: |
|
||||
rm -f {yggdrasil,yggdrasilctl}
|
||||
GOOS=windows GOARCH=amd64 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-amd64.exe && mv yggdrasilctl.exe /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-windows-amd64.exe;
|
||||
GOOS=windows GOARCH=386 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-i386.exe && mv yggdrasilctl.exe /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-windows-i386.exe;
|
||||
bash contrib/msi/build-msi.sh x64
|
||||
bash contrib/msi/build-msi.sh x86
|
||||
mv *.msi /tmp/upload
|
||||
|
||||
- persist_to_workspace:
|
||||
root: /tmp
|
||||
paths:
|
||||
- upload
|
||||
|
||||
build-other:
|
||||
docker:
|
||||
- image: circleci/golang:1.14.1
|
||||
- image: circleci/golang:1.16
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
@ -174,13 +224,6 @@ jobs:
|
|||
GOOS=freebsd GOARCH=amd64 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-freebsd-amd64 && mv yggdrasilctl /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-freebsd-amd64;
|
||||
GOOS=freebsd GOARCH=386 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-freebsd-i386 && mv yggdrasilctl /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-freebsd-i386;
|
||||
|
||||
- run:
|
||||
name: Build for Windows
|
||||
command: |
|
||||
rm -f {yggdrasil,yggdrasilctl}
|
||||
GOOS=windows GOARCH=amd64 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-amd64.exe && mv yggdrasilctl.exe /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-windows-amd64.exe;
|
||||
GOOS=windows GOARCH=386 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-i386.exe && mv yggdrasilctl.exe /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-windows-i386.exe;
|
||||
|
||||
- persist_to_workspace:
|
||||
root: /tmp
|
||||
paths:
|
||||
|
@ -201,11 +244,14 @@ workflows:
|
|||
version: 2.1
|
||||
build:
|
||||
jobs:
|
||||
- lint
|
||||
- build-linux
|
||||
- build-macos
|
||||
- build-windows
|
||||
- build-other
|
||||
- upload:
|
||||
requires:
|
||||
- build-linux
|
||||
- build-macos
|
||||
- build-windows
|
||||
- build-other
|
||||
|
|
10
.golangci.yml
Normal file
10
.golangci.yml
Normal file
|
@ -0,0 +1,10 @@
|
|||
run:
|
||||
build-tags:
|
||||
- lint
|
||||
issues-exit-code: 0 # TODO: change this to 1 when we want it to fail builds
|
||||
skip-dirs:
|
||||
- contrib/
|
||||
- misc/
|
||||
linters:
|
||||
disable:
|
||||
- gocyclo
|
46
CHANGELOG.md
46
CHANGELOG.md
|
@ -25,6 +25,52 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
|
|||
- in case of vulnerabilities.
|
||||
-->
|
||||
|
||||
## [0.3.16] - 2021-03-18
|
||||
### Added
|
||||
- New simulation code under `cmd/yggdrasilsim` (work-in-progress)
|
||||
|
||||
### Changed
|
||||
- Multi-threading in the switch
|
||||
- Swich lookups happen independently for each (incoming) peer connection, instead of being funneled to a single dedicated switch worker
|
||||
- Packets are queued for each (outgoing) peer connection, instead of being handled by a single dedicated switch worker
|
||||
- Queue logic rewritten
|
||||
- Heap structure per peer that traffic is routed to, with one FIFO queue per traffic flow
|
||||
- The total size of each heap is configured automatically (we basically queue packets until we think we're blocked on a socket write)
|
||||
- When adding to a full heap, the oldest packet from the largest queue is dropped
|
||||
- Packets are popped from the queue in FIFO order (oldest packet from among all queues in the heap) to prevent packet reordering at the session level
|
||||
- Removed global `sync.Pool` of `[]byte`
|
||||
- Local `sync.Pool`s are used in the hot loops, but not exported, to avoid memory corruption if libraries are reused by other projects
|
||||
- This may increase allocations (and slightly reduce speed in CPU-bound benchmarks) when interacting with the tun/tap device, but traffic forwarded at the switch layer should be unaffected
|
||||
- Upgrade dependencies
|
||||
- Upgrade build to Go 1.16
|
||||
|
||||
### Fixed
|
||||
- Fixed a bug where the connection listener could exit prematurely due to resoruce exhaustion (if e.g. too many connections were opened)
|
||||
- Fixed DefaultIfName for OpenBSD (`/dev/tun0` -> `tun0`)
|
||||
- Fixed an issue where a peer could sometimes never be added to the switch
|
||||
- Fixed a goroutine leak that could occur if a peer with an open connection continued to spam additional connection attempts
|
||||
|
||||
## [0.3.15] - 2020-09-27
|
||||
### Added
|
||||
- Support for pinning remote public keys in peering strings has been added, e.g.
|
||||
- By signing public key: `tcp://host:port?ed25519=key`
|
||||
- By encryption public key: `tcp://host:port?curve25519=key`
|
||||
- By both: `tcp://host:port?ed25519=key&curve25519=key`
|
||||
- By multiple, in case of DNS round-robin or similar: `tcp://host:port?curve25519=key&curve25519=key&ed25519=key&ed25519=key`
|
||||
- Some checks to prevent Yggdrasil-over-Yggdrasil peerings have been added
|
||||
- Added support for SOCKS proxy authentication, e.g. `socks://user@password:host/...`
|
||||
|
||||
### Fixed
|
||||
- Some bugs in the multicast code that could cause unnecessary CPU usage have been fixed
|
||||
- A possible multicast deadlock on macOS when enumerating interfaces has been fixed
|
||||
- A deadlock in the connection code has been fixed
|
||||
- Updated HJSON dependency that caused some build problems
|
||||
|
||||
### Changed
|
||||
- `DisconnectPeer` and `RemovePeer` have been separated and implemented properly now
|
||||
- Less nodes are stored in the DHT now, reducing ambient network traffic and possible instability
|
||||
- Default config file for FreeBSD is now at `/usr/local/etc/yggdrasil.conf` instead of `/etc/yggdrasil.conf`
|
||||
|
||||
## [0.3.14] - 2020-03-28
|
||||
### Fixed
|
||||
- Fixes a memory leak that may occur if packets are incorrectly never removed from a switch queue
|
||||
|
|
|
@ -26,7 +26,7 @@ some of the below:
|
|||
- Linux
|
||||
- `.deb` and `.rpm` packages are built by CI for Debian and Red Hat-based
|
||||
distributions
|
||||
- Void and Arch packages also available within their respective repositories
|
||||
- Arch, Nix, Void packages also available within their respective repositories
|
||||
- macOS
|
||||
- `.pkg` packages are built by CI
|
||||
- Ubiquiti EdgeOS
|
||||
|
@ -48,7 +48,7 @@ You may also find other platform-specific wrappers, scripts or tools in the
|
|||
If you want to build from source, as opposed to installing one of the pre-built
|
||||
packages:
|
||||
|
||||
1. Install [Go](https://golang.org) (requires Go 1.13 or later)
|
||||
1. Install [Go](https://golang.org) (requires Go 1.16 or later)
|
||||
2. Clone this repository
|
||||
2. Run `./build`
|
||||
|
||||
|
|
20
appveyor.yml
20
appveyor.yml
|
@ -1,20 +0,0 @@
|
|||
version: '{build}'
|
||||
pull_requests:
|
||||
do_not_increment_build_number: true
|
||||
os: Visual Studio 2017
|
||||
shallow_clone: false
|
||||
|
||||
environment:
|
||||
MSYS2_PATH_TYPE: inherit
|
||||
CHERE_INVOKING: enabled_from_arguments
|
||||
|
||||
build_script:
|
||||
- cmd: >-
|
||||
cd %APPVEYOR_BUILD_FOLDER%
|
||||
- c:\msys64\usr\bin\bash -lc "./contrib/msi/build-msi.sh x64"
|
||||
- c:\msys64\usr\bin\bash -lc "./contrib/msi/build-msi.sh x86"
|
||||
|
||||
test: off
|
||||
|
||||
artifacts:
|
||||
- path: '*.msi'
|
|
@ -13,119 +13,66 @@ This only matters if it's high enough to make you the root of the tree.
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
)
|
||||
|
||||
var doSig = flag.Bool("sig", false, "generate new signing keys instead")
|
||||
|
||||
type keySet struct {
|
||||
priv []byte
|
||||
pub []byte
|
||||
id []byte
|
||||
ip string
|
||||
priv ed25519.PrivateKey
|
||||
pub ed25519.PublicKey
|
||||
}
|
||||
|
||||
func main() {
|
||||
threads := runtime.GOMAXPROCS(0)
|
||||
var threadChannels []chan []byte
|
||||
var currentBest []byte
|
||||
var currentBest ed25519.PublicKey
|
||||
newKeys := make(chan keySet, threads)
|
||||
flag.Parse()
|
||||
|
||||
for i := 0; i < threads; i++ {
|
||||
threadChannels = append(threadChannels, make(chan []byte, threads))
|
||||
switch {
|
||||
case *doSig:
|
||||
go doSigKeys(newKeys, threadChannels[i])
|
||||
default:
|
||||
go doBoxKeys(newKeys, threadChannels[i])
|
||||
go doKeys(newKeys)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
newKey := <-newKeys
|
||||
if isBetter(currentBest[:], newKey.id[:]) || len(currentBest) == 0 {
|
||||
currentBest = newKey.id
|
||||
for _, channel := range threadChannels {
|
||||
select {
|
||||
case channel <- newKey.id:
|
||||
}
|
||||
}
|
||||
fmt.Println("--------------------------------------------------------------------------------")
|
||||
switch {
|
||||
case *doSig:
|
||||
fmt.Println("sigPriv:", hex.EncodeToString(newKey.priv[:]))
|
||||
fmt.Println("sigPub:", hex.EncodeToString(newKey.pub[:]))
|
||||
fmt.Println("TreeID:", hex.EncodeToString(newKey.id[:]))
|
||||
default:
|
||||
fmt.Println("boxPriv:", hex.EncodeToString(newKey.priv[:]))
|
||||
fmt.Println("boxPub:", hex.EncodeToString(newKey.pub[:]))
|
||||
fmt.Println("NodeID:", hex.EncodeToString(newKey.id[:]))
|
||||
fmt.Println("IP:", newKey.ip)
|
||||
}
|
||||
if isBetter(currentBest, newKey.pub) || len(currentBest) == 0 {
|
||||
currentBest = newKey.pub
|
||||
fmt.Println("-----")
|
||||
fmt.Println("Priv:", hex.EncodeToString(newKey.priv))
|
||||
fmt.Println("Pub:", hex.EncodeToString(newKey.pub))
|
||||
addr := address.AddrForKey(newKey.pub)
|
||||
fmt.Println("IP:", net.IP(addr[:]).String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isBetter(oldID, newID []byte) bool {
|
||||
for idx := range oldID {
|
||||
if newID[idx] > oldID[idx] {
|
||||
func isBetter(oldPub, newPub ed25519.PublicKey) bool {
|
||||
for idx := range oldPub {
|
||||
if newPub[idx] < oldPub[idx] {
|
||||
return true
|
||||
}
|
||||
if newID[idx] < oldID[idx] {
|
||||
return false
|
||||
if newPub[idx] > oldPub[idx] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func doBoxKeys(out chan<- keySet, in <-chan []byte) {
|
||||
var bestID crypto.NodeID
|
||||
for {
|
||||
select {
|
||||
case newBestID := <-in:
|
||||
if isBetter(bestID[:], newBestID) {
|
||||
copy(bestID[:], newBestID)
|
||||
}
|
||||
default:
|
||||
pub, priv := crypto.NewBoxKeys()
|
||||
id := crypto.GetNodeID(pub)
|
||||
if !isBetter(bestID[:], id[:]) {
|
||||
continue
|
||||
}
|
||||
bestID = *id
|
||||
ip := net.IP(address.AddrForNodeID(id)[:]).String()
|
||||
out <- keySet{priv[:], pub[:], id[:], ip}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doSigKeys(out chan<- keySet, in <-chan []byte) {
|
||||
var bestID crypto.TreeID
|
||||
for idx := range bestID {
|
||||
bestID[idx] = 0
|
||||
func doKeys(out chan<- keySet) {
|
||||
bestKey := make(ed25519.PublicKey, ed25519.PublicKeySize)
|
||||
for idx := range bestKey {
|
||||
bestKey[idx] = 0xff
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case newBestID := <-in:
|
||||
if isBetter(bestID[:], newBestID) {
|
||||
copy(bestID[:], newBestID)
|
||||
pub, priv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
default:
|
||||
}
|
||||
pub, priv := crypto.NewSigKeys()
|
||||
id := crypto.GetTreeID(pub)
|
||||
if !isBetter(bestID[:], id[:]) {
|
||||
if !isBetter(bestKey, pub) {
|
||||
continue
|
||||
}
|
||||
bestID = *id
|
||||
out <- keySet{priv[:], pub[:], id[:], ""}
|
||||
bestKey = pub
|
||||
out <- keySet{priv, pub}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
|
@ -24,17 +25,21 @@ import (
|
|||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/admin"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
<<<<<<< HEAD
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/mdns"
|
||||
=======
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/core"
|
||||
>>>>>>> future
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/module"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/multicast"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/tuntap"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
)
|
||||
|
||||
type node struct {
|
||||
core yggdrasil.Core
|
||||
core core.Core
|
||||
state *config.NodeState
|
||||
tuntap module.Module // tuntap.TunAdapter
|
||||
multicast module.Module // multicast.Multicast
|
||||
|
@ -62,8 +67,8 @@ func readConfig(useconf *bool, useconffile *string, normaliseconf *bool) *config
|
|||
// throwing everywhere when it's converting things into UTF-16 for the hell
|
||||
// of it - remove it and decode back down into UTF-8. This is necessary
|
||||
// because hjson doesn't know what to do with UTF-16 and will panic
|
||||
if bytes.Compare(conf[0:2], []byte{0xFF, 0xFE}) == 0 ||
|
||||
bytes.Compare(conf[0:2], []byte{0xFE, 0xFF}) == 0 {
|
||||
if bytes.Equal(conf[0:2], []byte{0xFF, 0xFE}) ||
|
||||
bytes.Equal(conf[0:2], []byte{0xFE, 0xFF}) {
|
||||
utf := unicode.UTF16(unicode.BigEndian, unicode.UseBOM)
|
||||
decoder := utf.NewDecoder()
|
||||
conf, err = decoder.Bytes(conf)
|
||||
|
@ -221,25 +226,23 @@ func main() {
|
|||
return
|
||||
}
|
||||
// Have we been asked for the node address yet? If so, print it and then stop.
|
||||
getNodeID := func() *crypto.NodeID {
|
||||
if pubkey, err := hex.DecodeString(cfg.EncryptionPublicKey); err == nil {
|
||||
var box crypto.BoxPubKey
|
||||
copy(box[:], pubkey[:])
|
||||
return crypto.GetNodeID(&box)
|
||||
getNodeKey := func() ed25519.PublicKey {
|
||||
if pubkey, err := hex.DecodeString(cfg.PublicKey); err == nil {
|
||||
return ed25519.PublicKey(pubkey)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
switch {
|
||||
case *getaddr:
|
||||
if nodeid := getNodeID(); nodeid != nil {
|
||||
addr := *address.AddrForNodeID(nodeid)
|
||||
if key := getNodeKey(); key != nil {
|
||||
addr := address.AddrForKey(key)
|
||||
ip := net.IP(addr[:])
|
||||
fmt.Println(ip.String())
|
||||
}
|
||||
return
|
||||
case *getsnet:
|
||||
if nodeid := getNodeID(); nodeid != nil {
|
||||
snet := *address.SubnetForNodeID(nodeid)
|
||||
if key := getNodeKey(); key != nil {
|
||||
snet := address.SubnetForKey(key)
|
||||
ipnet := net.IPNet{
|
||||
IP: append(snet[:], 0, 0, 0, 0, 0, 0, 0, 0),
|
||||
Mask: net.CIDRMask(len(snet)*8, 128),
|
||||
|
@ -281,12 +284,12 @@ func main() {
|
|||
panic(err)
|
||||
}
|
||||
// Register the session firewall gatekeeper function
|
||||
n.core.SetSessionGatekeeper(n.sessionFirewall)
|
||||
// Allocate our modules
|
||||
n.admin = &admin.AdminSocket{}
|
||||
n.multicast = &multicast.Multicast{}
|
||||
n.mdns = &mdns.MDNS{}
|
||||
n.tuntap = &tuntap.TunAdapter{}
|
||||
n.tuntap.(*tuntap.TunAdapter).SetSessionGatekeeper(n.sessionFirewall)
|
||||
// Start the admin socket
|
||||
n.admin.Init(&n.core, n.state, logger, nil)
|
||||
if err := n.admin.Start(); err != nil {
|
||||
|
@ -306,19 +309,11 @@ func main() {
|
|||
}
|
||||
n.mdns.SetupAdminHandlers(n.admin.(*admin.AdminSocket))
|
||||
// Start the TUN/TAP interface
|
||||
if listener, err := n.core.ConnListen(); err == nil {
|
||||
if dialer, err := n.core.ConnDialer(); err == nil {
|
||||
n.tuntap.Init(&n.core, n.state, logger, tuntap.TunOptions{Listener: listener, Dialer: dialer})
|
||||
n.tuntap.Init(&n.core, n.state, logger, nil)
|
||||
if err := n.tuntap.Start(); err != nil {
|
||||
logger.Errorln("An error occurred starting TUN/TAP:", err)
|
||||
}
|
||||
n.tuntap.SetupAdminHandlers(n.admin.(*admin.AdminSocket))
|
||||
} else {
|
||||
logger.Errorln("Unable to get Dialer:", err)
|
||||
}
|
||||
} else {
|
||||
logger.Errorln("Unable to get Listener:", err)
|
||||
}
|
||||
// Make some nice output that tells us what our IPv6 address and subnet are.
|
||||
// This is just logged to stdout for the user.
|
||||
address := n.core.Address()
|
||||
|
@ -327,11 +322,11 @@ func main() {
|
|||
logger.Infof("Your IPv6 subnet is %s", subnet.String())
|
||||
// Catch interrupts from the operating system to exit gracefully.
|
||||
c := make(chan os.Signal, 1)
|
||||
r := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
signal.Notify(r, os.Interrupt, syscall.SIGHUP)
|
||||
// Capture the service being stopped on Windows.
|
||||
<-c
|
||||
minwinsvc.SetOnExit(n.shutdown)
|
||||
<<<<<<< HEAD
|
||||
defer n.shutdown()
|
||||
// Wait for the terminate/interrupt signal. Once a signal is received, the
|
||||
// deferred Stop function above will run which will shut down TUN/TAP.
|
||||
|
@ -353,6 +348,9 @@ func main() {
|
|||
}
|
||||
}
|
||||
exit:
|
||||
=======
|
||||
n.shutdown()
|
||||
>>>>>>> future
|
||||
}
|
||||
|
||||
func (n *node) shutdown() {
|
||||
|
@ -362,7 +360,7 @@ func (n *node) shutdown() {
|
|||
n.core.Stop()
|
||||
}
|
||||
|
||||
func (n *node) sessionFirewall(pubkey *crypto.BoxPubKey, initiator bool) bool {
|
||||
func (n *node) sessionFirewall(pubkey ed25519.PublicKey, initiator bool) bool {
|
||||
n.state.Mutex.RLock()
|
||||
defer n.state.Mutex.RUnlock()
|
||||
|
||||
|
@ -371,25 +369,21 @@ func (n *node) sessionFirewall(pubkey *crypto.BoxPubKey, initiator bool) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// Prepare for checking whitelist/blacklist
|
||||
var box crypto.BoxPubKey
|
||||
// Reject blacklisted nodes
|
||||
for _, b := range n.state.Current.SessionFirewall.BlacklistEncryptionPublicKeys {
|
||||
for _, b := range n.state.Current.SessionFirewall.BlacklistPublicKeys {
|
||||
key, err := hex.DecodeString(b)
|
||||
if err == nil {
|
||||
copy(box[:crypto.BoxPubKeyLen], key)
|
||||
if box == *pubkey {
|
||||
if bytes.Equal(key, pubkey) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow whitelisted nodes
|
||||
for _, b := range n.state.Current.SessionFirewall.WhitelistEncryptionPublicKeys {
|
||||
for _, b := range n.state.Current.SessionFirewall.WhitelistPublicKeys {
|
||||
key, err := hex.DecodeString(b)
|
||||
if err == nil {
|
||||
copy(box[:crypto.BoxPubKeyLen], key)
|
||||
if box == *pubkey {
|
||||
if bytes.Equal(key, pubkey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -405,7 +399,7 @@ func (n *node) sessionFirewall(pubkey *crypto.BoxPubKey, initiator bool) bool {
|
|||
// Look and see if the pubkey is that of a direct peer
|
||||
var isDirectPeer bool
|
||||
for _, peer := range n.core.GetPeers() {
|
||||
if peer.PublicKey == *pubkey {
|
||||
if bytes.Equal(peer.Key[:], pubkey[:]) {
|
||||
isDirectPeer = true
|
||||
break
|
||||
}
|
||||
|
|
|
@ -78,8 +78,8 @@ func run() int {
|
|||
|
||||
if *server == endpoint {
|
||||
if config, err := ioutil.ReadFile(defaults.GetDefaults().DefaultConfigFile); err == nil {
|
||||
if bytes.Compare(config[0:2], []byte{0xFF, 0xFE}) == 0 ||
|
||||
bytes.Compare(config[0:2], []byte{0xFE, 0xFF}) == 0 {
|
||||
if bytes.Equal(config[0:2], []byte{0xFF, 0xFE}) ||
|
||||
bytes.Equal(config[0:2], []byte{0xFE, 0xFF}) {
|
||||
utf := unicode.UTF16(unicode.BigEndian, unicode.UseBOM)
|
||||
decoder := utf.NewDecoder()
|
||||
config, err = decoder.Bytes(config)
|
||||
|
|
|
@ -6,6 +6,7 @@ This file generates crypto keys for [ansible-yggdrasil](https://github.com/jcgru
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
@ -14,7 +15,6 @@ import (
|
|||
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
)
|
||||
|
||||
var numHosts = flag.Int("hosts", 1, "number of host vars to generate")
|
||||
|
@ -23,7 +23,6 @@ var keyTries = flag.Int("tries", 1000, "number of tries before taking the best k
|
|||
type keySet struct {
|
||||
priv []byte
|
||||
pub []byte
|
||||
id []byte
|
||||
ip string
|
||||
}
|
||||
|
||||
|
@ -37,27 +36,15 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
var encryptionKeys []keySet
|
||||
var keys []keySet
|
||||
for i := 0; i < *numHosts+1; i++ {
|
||||
encryptionKeys = append(encryptionKeys, newBoxKey())
|
||||
keys = append(keys, newKey())
|
||||
bar.Increment()
|
||||
}
|
||||
encryptionKeys = sortKeySetArray(encryptionKeys)
|
||||
keys = sortKeySetArray(keys)
|
||||
for i := 0; i < *keyTries-*numHosts-1; i++ {
|
||||
encryptionKeys[0] = newBoxKey()
|
||||
encryptionKeys = bubbleUpTo(encryptionKeys, 0)
|
||||
bar.Increment()
|
||||
}
|
||||
|
||||
var signatureKeys []keySet
|
||||
for i := 0; i < *numHosts+1; i++ {
|
||||
signatureKeys = append(signatureKeys, newSigKey())
|
||||
bar.Increment()
|
||||
}
|
||||
signatureKeys = sortKeySetArray(signatureKeys)
|
||||
for i := 0; i < *keyTries-*numHosts-1; i++ {
|
||||
signatureKeys[0] = newSigKey()
|
||||
signatureKeys = bubbleUpTo(signatureKeys, 0)
|
||||
keys[0] = newKey()
|
||||
keys = bubbleUpTo(keys, 0)
|
||||
bar.Increment()
|
||||
}
|
||||
|
||||
|
@ -70,43 +57,36 @@ func main() {
|
|||
return
|
||||
}
|
||||
defer file.Close()
|
||||
file.WriteString(fmt.Sprintf("yggdrasil_encryption_public_key: %v\n", hex.EncodeToString(encryptionKeys[i].pub)))
|
||||
file.WriteString("yggdrasil_encryption_private_key: \"{{ vault_yggdrasil_encryption_private_key }}\"\n")
|
||||
file.WriteString(fmt.Sprintf("yggdrasil_signing_public_key: %v\n", hex.EncodeToString(signatureKeys[i].pub)))
|
||||
file.WriteString("yggdrasil_signing_private_key: \"{{ vault_yggdrasil_signing_private_key }}\"\n")
|
||||
file.WriteString(fmt.Sprintf("ansible_host: %v\n", encryptionKeys[i].ip))
|
||||
file.WriteString(fmt.Sprintf("yggdrasil_public_key: %v\n", hex.EncodeToString(keys[i].pub)))
|
||||
file.WriteString("yggdrasil_private_key: \"{{ vault_yggdrasil_private_key }}\"\n")
|
||||
file.WriteString(fmt.Sprintf("ansible_host: %v\n", keys[i].ip))
|
||||
|
||||
file, err = os.Create(fmt.Sprintf("host_vars/%x/vault", i))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
file.WriteString(fmt.Sprintf("vault_yggdrasil_encryption_private_key: %v\n", hex.EncodeToString(encryptionKeys[i].priv)))
|
||||
file.WriteString(fmt.Sprintf("vault_yggdrasil_signing_private_key: %v\n", hex.EncodeToString(signatureKeys[i].priv)))
|
||||
file.WriteString(fmt.Sprintf("vault_yggdrasil_private_key: %v\n", hex.EncodeToString(keys[i].priv)))
|
||||
bar.Increment()
|
||||
}
|
||||
bar.Finish()
|
||||
}
|
||||
|
||||
func newBoxKey() keySet {
|
||||
pub, priv := crypto.NewBoxKeys()
|
||||
id := crypto.GetNodeID(pub)
|
||||
ip := net.IP(address.AddrForNodeID(id)[:]).String()
|
||||
return keySet{priv[:], pub[:], id[:], ip}
|
||||
}
|
||||
|
||||
func newSigKey() keySet {
|
||||
pub, priv := crypto.NewSigKeys()
|
||||
id := crypto.GetTreeID(pub)
|
||||
return keySet{priv[:], pub[:], id[:], ""}
|
||||
func newKey() keySet {
|
||||
pub, priv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ip := net.IP(address.AddrForKey(pub)[:]).String()
|
||||
return keySet{priv[:], pub[:], ip}
|
||||
}
|
||||
|
||||
func isBetter(oldID, newID []byte) bool {
|
||||
for idx := range oldID {
|
||||
if newID[idx] > oldID[idx] {
|
||||
if newID[idx] < oldID[idx] {
|
||||
return true
|
||||
}
|
||||
if newID[idx] < oldID[idx] {
|
||||
if newID[idx] > oldID[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -122,7 +102,7 @@ func sortKeySetArray(sets []keySet) []keySet {
|
|||
|
||||
func bubbleUpTo(sets []keySet, num int) []keySet {
|
||||
for i := 0; i < len(sets)-num-1; i++ {
|
||||
if isBetter(sets[i+1].id, sets[i].id) {
|
||||
if isBetter(sets[i+1].pub, sets[i].pub) {
|
||||
var tmp = sets[i]
|
||||
sets[i] = sets[i+1]
|
||||
sets[i+1] = tmp
|
||||
|
|
|
@ -1,25 +1,17 @@
|
|||
# Last Modified: Tue Mar 10 16:38:14 2020
|
||||
# Last Modified: Fri Oct 30 11:33:31 2020
|
||||
#include <tunables/global>
|
||||
|
||||
/usr/bin/yggdrasil {
|
||||
#include <abstractions/base>
|
||||
#include <abstractions/nameservice>
|
||||
|
||||
capability net_admin,
|
||||
capability net_raw,
|
||||
|
||||
network inet stream,
|
||||
network inet dgram,
|
||||
network inet6 dgram,
|
||||
network inet6 stream,
|
||||
network netlink raw,
|
||||
|
||||
/lib/@{multiarch}/ld-*.so mr,
|
||||
/proc/sys/net/core/somaxconn r,
|
||||
owner /sys/kernel/mm/transparent_hugepage/hpage_pmd_size r,
|
||||
/dev/net/tun rw,
|
||||
/proc/sys/net/core/somaxconn r,
|
||||
/sys/kernel/mm/transparent_hugepage/hpage_pmd_size r,
|
||||
|
||||
/usr/bin/yggdrasil mr,
|
||||
/etc/yggdrasil.conf rw,
|
||||
/run/yggdrasil.sock rw,
|
||||
|
||||
}
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
package main
|
||||
|
||||
/*
|
||||
This is a small utility that is designed to accompany the vyatta-yggdrasil
|
||||
package. It takes a HJSON configuration file, makes changes to it based on
|
||||
the command line arguments, and then spits out an updated file.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
|
||||
"github.com/hjson/hjson-go"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
)
|
||||
|
||||
type nodeConfig = config.NodeConfig
|
||||
|
||||
func main() {
|
||||
useconffile := flag.String("useconffile", "/etc/yggdrasil.conf", "update config at specified file path")
|
||||
flag.Parse()
|
||||
cfg := nodeConfig{}
|
||||
var config []byte
|
||||
var err error
|
||||
config, err = ioutil.ReadFile(*useconffile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if bytes.Compare(config[0:2], []byte{0xFF, 0xFE}) == 0 ||
|
||||
bytes.Compare(config[0:2], []byte{0xFE, 0xFF}) == 0 {
|
||||
utf := unicode.UTF16(unicode.BigEndian, unicode.UseBOM)
|
||||
decoder := utf.NewDecoder()
|
||||
config, err = decoder.Bytes(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
var dat map[string]interface{}
|
||||
if err := hjson.Unmarshal(config, &dat); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
confJson, err := json.Marshal(dat)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
json.Unmarshal(confJson, &cfg)
|
||||
switch flag.Arg(0) {
|
||||
case "setMTU":
|
||||
cfg.IfMTU, err = strconv.Atoi(flag.Arg(1))
|
||||
if err != nil {
|
||||
cfg.IfMTU = 1280
|
||||
}
|
||||
if mtu, _ := strconv.Atoi(flag.Arg(1)); mtu < 1280 {
|
||||
cfg.IfMTU = 1280
|
||||
}
|
||||
case "setIfName":
|
||||
cfg.IfName = flag.Arg(1)
|
||||
case "setListen":
|
||||
cfg.Listen = flag.Arg(1)
|
||||
case "setAdminListen":
|
||||
cfg.AdminListen = flag.Arg(1)
|
||||
case "setIfTapMode":
|
||||
if flag.Arg(1) == "true" {
|
||||
cfg.IfTAPMode = true
|
||||
} else {
|
||||
cfg.IfTAPMode = false
|
||||
}
|
||||
case "addPeer":
|
||||
found := false
|
||||
for _, v := range cfg.Peers {
|
||||
if v == flag.Arg(1) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
cfg.Peers = append(cfg.Peers, flag.Arg(1))
|
||||
}
|
||||
case "removePeer":
|
||||
for k, v := range cfg.Peers {
|
||||
if v == flag.Arg(1) {
|
||||
cfg.Peers = append(cfg.Peers[:k], cfg.Peers[k+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
bs, err := hjson.Marshal(cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(string(bs))
|
||||
return
|
||||
}
|
|
@ -83,7 +83,7 @@ then
|
|||
echo "Backing up configuration file to /var/backups/yggdrasil.conf.`date +%Y%m%d`"
|
||||
cp /etc/yggdrasil.conf /var/backups/yggdrasil.conf.`date +%Y%m%d`
|
||||
echo "Normalising and updating /etc/yggdrasil.conf"
|
||||
/usr/bin/yggdrasil -useconffile /var/backups/yggdrasil.conf.`date +%Y%m%d` -normaliseconf > /etc/yggdrasil.conf
|
||||
/usr/bin/yggdrasil -useconf -normaliseconf < /var/backups/yggdrasil.conf.`date +%Y%m%d` > /etc/yggdrasil.conf
|
||||
chgrp yggdrasil /etc/yggdrasil.conf
|
||||
|
||||
if command -v systemctl >/dev/null; then
|
||||
|
@ -94,7 +94,7 @@ then
|
|||
else
|
||||
echo "Generating initial configuration file /etc/yggdrasil.conf"
|
||||
echo "Please familiarise yourself with this file before starting Yggdrasil"
|
||||
/usr/bin/yggdrasil -genconf > /etc/yggdrasil.conf
|
||||
sh -c 'umask 0027 && /usr/bin/yggdrasil -genconf > /etc/yggdrasil.conf'
|
||||
chgrp yggdrasil /etc/yggdrasil.conf
|
||||
fi
|
||||
EOF
|
||||
|
|
|
@ -8,7 +8,6 @@ ENV CGO_ENABLED=0
|
|||
RUN apk add git && ./build && go build -o /src/genkeys cmd/genkeys/main.go
|
||||
|
||||
FROM docker.io/alpine
|
||||
LABEL maintainer="Christer Waren/CWINFO <christer.waren@cwinfo.org>"
|
||||
|
||||
COPY --from=builder /src/yggdrasil /usr/bin/yggdrasil
|
||||
COPY --from=builder /src/yggdrasilctl /usr/bin/yggdrasilctl
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# This script generates an MSI file for Yggdrasil for a given architecture. It
|
||||
# needs to run on Windows within MSYS2 and Go 1.13 or later must be installed on
|
||||
# the system and within the PATH. This is ran currently by Appveyor (see
|
||||
# appveyor.yml in the repository root) for both x86 and x64.
|
||||
# needs to run on Linux or macOS with Go 1.16, wixl and msitools installed.
|
||||
#
|
||||
# Author: Neil Alexander <neilalexander@users.noreply.github.com>
|
||||
|
||||
|
@ -11,7 +9,7 @@
|
|||
PKGARCH=$1
|
||||
if [ "${PKGARCH}" == "" ];
|
||||
then
|
||||
echo "tell me the architecture: x86 or x64"
|
||||
echo "tell me the architecture: x86, x64 or arm"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -28,28 +26,11 @@ then
|
|||
git checkout ${APPVEYOR_REPO_BRANCH}
|
||||
fi
|
||||
|
||||
# Install prerequisites within MSYS2
|
||||
pacman -S --needed --noconfirm unzip git curl
|
||||
|
||||
# Download the wix tools!
|
||||
if [ ! -d wixbin ];
|
||||
then
|
||||
curl -LO https://github.com/wixtoolset/wix3/releases/download/wix3112rtm/wix311-binaries.zip
|
||||
if [ `md5sum wix311-binaries.zip | cut -f 1 -d " "` != "47a506f8ab6666ee3cc502fb07d0ee2a" ];
|
||||
then
|
||||
echo "wix package didn't match expected checksum"
|
||||
exit 1
|
||||
fi
|
||||
mkdir -p wixbin
|
||||
unzip -o wix311-binaries.zip -d wixbin || (
|
||||
echo "failed to unzip WiX"
|
||||
exit 1
|
||||
)
|
||||
fi
|
||||
|
||||
# Build Yggdrasil!
|
||||
[ "${PKGARCH}" == "x64" ] && GOOS=windows GOARCH=amd64 CGO_ENABLED=0 ./build
|
||||
[ "${PKGARCH}" == "x86" ] && GOOS=windows GOARCH=386 CGO_ENABLED=0 ./build
|
||||
[ "${PKGARCH}" == "x64" ] && GOOS=windows GOARCH=amd64 CGO_ENABLED=0 ./build -p -l "-aslr"
|
||||
[ "${PKGARCH}" == "x86" ] && GOOS=windows GOARCH=386 CGO_ENABLED=0 ./build -p -l "-aslr"
|
||||
[ "${PKGARCH}" == "arm" ] && GOOS=windows GOARCH=arm CGO_ENABLED=0 ./build -p -l "-aslr"
|
||||
#[ "${PKGARCH}" == "arm64" ] && GOOS=windows GOARCH=arm64 CGO_ENABLED=0 ./build
|
||||
|
||||
# Create the postinstall script
|
||||
cat > updateconfig.bat << EOF
|
||||
|
@ -58,8 +39,10 @@ if not exist %ALLUSERSPROFILE%\\Yggdrasil (
|
|||
)
|
||||
if not exist %ALLUSERSPROFILE%\\Yggdrasil\\yggdrasil.conf (
|
||||
if exist yggdrasil.exe (
|
||||
if not exist %ALLUSERSPROFILE%\\Yggdrasil\\yggdrasil.conf (
|
||||
yggdrasil.exe -genconf > %ALLUSERSPROFILE%\\Yggdrasil\\yggdrasil.conf
|
||||
)
|
||||
)
|
||||
)
|
||||
EOF
|
||||
|
||||
|
@ -72,12 +55,16 @@ PKGVERSIONMS=$(echo $PKGVERSION | tr - .)
|
|||
PKGGUID="54a3294e-a441-4322-aefb-3bb40dd022bb" PKGINSTFOLDER="ProgramFilesFolder"
|
||||
|
||||
# Download the Wintun driver
|
||||
curl -o wintun.zip https://www.wintun.net/builds/wintun-0.10.2.zip
|
||||
unzip wintun.zip
|
||||
if [ $PKGARCH = "x64" ]; then
|
||||
PKGMSMNAME=wintun-x64.msm
|
||||
curl -o ${PKGMSMNAME} https://www.wintun.net/builds/wintun-amd64-0.7.msm || (echo "couldn't get wintun"; exit 1)
|
||||
PKGWINTUNDLL=wintun/bin/amd64/wintun.dll
|
||||
elif [ $PKGARCH = "x86" ]; then
|
||||
PKGMSMNAME=wintun-x86.msm
|
||||
curl -o ${PKGMSMNAME} https://www.wintun.net/builds/wintun-x86-0.7.msm || (echo "couldn't get wintun"; exit 1)
|
||||
PKGWINTUNDLL=wintun/bin/x86/wintun.dll
|
||||
elif [ $PKGARCH = "arm" ]; then
|
||||
PKGWINTUNDLL=wintun/bin/arm/wintun.dll
|
||||
#elif [ $PKGARCH = "arm64" ]; then
|
||||
# PKGWINTUNDLL=wintun/bin/arm64/wintun.dll
|
||||
else
|
||||
echo "wasn't sure which architecture to get wintun for"
|
||||
exit 1
|
||||
|
@ -100,6 +87,7 @@ cat > wix.xml << EOF
|
|||
Language="1033"
|
||||
Codepage="1252"
|
||||
Version="${PKGVERSIONMS}"
|
||||
Platform="${PKGARCH}"
|
||||
Manufacturer="github.com/yggdrasil-network">
|
||||
|
||||
<Package
|
||||
|
@ -136,6 +124,12 @@ cat > wix.xml << EOF
|
|||
Source="yggdrasil.exe"
|
||||
KeyPath="yes" />
|
||||
|
||||
<File
|
||||
Id="Wintun"
|
||||
Name="wintun.dll"
|
||||
DiskId="1"
|
||||
Source="${PKGWINTUNDLL}" />
|
||||
|
||||
<ServiceInstall
|
||||
Id="ServiceInstaller"
|
||||
Account="LocalSystem"
|
||||
|
@ -176,12 +170,6 @@ cat > wix.xml << EOF
|
|||
</Component>
|
||||
</Directory>
|
||||
</Directory>
|
||||
|
||||
<Merge
|
||||
Id="Wintun"
|
||||
Language="0"
|
||||
DiskId="1"
|
||||
SourceFile="${PKGMSMNAME}" />
|
||||
</Directory>
|
||||
|
||||
<Feature Id="YggdrasilFeature" Title="Yggdrasil" Level="1">
|
||||
|
@ -190,13 +178,6 @@ cat > wix.xml << EOF
|
|||
<ComponentRef Id="ConfigScript" />
|
||||
</Feature>
|
||||
|
||||
<Feature Id="WintunFeature" Title="Wintun" Level="1">
|
||||
<Condition Level="0">
|
||||
UPGRADINGPRODUCTCODE
|
||||
</Condition>
|
||||
<MergeRef Id="Wintun" />
|
||||
</Feature>
|
||||
|
||||
<CustomAction
|
||||
Id="UpdateGenerateConfig"
|
||||
Directory="YggdrasilInstallFolder"
|
||||
|
@ -208,9 +189,7 @@ cat > wix.xml << EOF
|
|||
<InstallExecuteSequence>
|
||||
<Custom
|
||||
Action="UpdateGenerateConfig"
|
||||
Before="StartServices">
|
||||
NOT Installed AND NOT REMOVE
|
||||
</Custom>
|
||||
Before="StartServices" />
|
||||
</InstallExecuteSequence>
|
||||
|
||||
</Product>
|
||||
|
@ -218,7 +197,4 @@ cat > wix.xml << EOF
|
|||
EOF
|
||||
|
||||
# Generate the MSI
|
||||
CANDLEFLAGS="-nologo"
|
||||
LIGHTFLAGS="-nologo -spdb -sice:ICE71 -sice:ICE61"
|
||||
wixbin/candle $CANDLEFLAGS -out ${PKGNAME}-${PKGVERSION}-${PKGARCH}.wixobj -arch ${PKGARCH} wix.xml && \
|
||||
wixbin/light $LIGHTFLAGS -ext WixUtilExtension.dll -out ${PKGNAME}-${PKGVERSION}-${PKGARCH}.msi ${PKGNAME}-${PKGVERSION}-${PKGARCH}.wixobj
|
||||
wixl -v wix.xml -a ${PKGARCH} -o ${PKGNAME}-${PKGVERSION}-${PKGARCH}.msi
|
||||
|
|
36
go.mod
36
go.mod
|
@ -1,24 +1,24 @@
|
|||
module github.com/yggdrasil-network/yggdrasil-go
|
||||
|
||||
go 1.13
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/Arceliar/phony v0.0.0-20191006174943-d0c68492aca0
|
||||
github.com/cheggaaa/pb/v3 v3.0.4
|
||||
github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8
|
||||
github.com/Arceliar/ironwood v0.0.0-20210531083357-daeea6bc386a
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
||||
github.com/cheggaaa/pb/v3 v3.0.6
|
||||
github.com/fatih/color v1.10.0 // indirect
|
||||
github.com/gologme/log v1.2.0
|
||||
github.com/hashicorp/go-syslog v1.0.0
|
||||
github.com/hjson/hjson-go v3.0.2-0.20200316202735-d5d0e8b0617d+incompatible
|
||||
github.com/kardianos/minwinsvc v0.0.0-20151122163309-cad6b2b879b0
|
||||
github.com/miekg/dns v1.1.27 // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2
|
||||
github.com/neilalexander/mdns v0.3.1-0.20200509205547-9b2aa0712e91
|
||||
github.com/vishvananda/netlink v1.0.0
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
|
||||
golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de
|
||||
golang.zx2c4.com/wireguard v0.0.20200320
|
||||
golang.zx2c4.com/wireguard/windows v0.1.0
|
||||
github.com/hjson/hjson-go v3.1.0+incompatible
|
||||
github.com/kardianos/minwinsvc v1.0.0
|
||||
github.com/mattn/go-runewidth v0.0.10 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b
|
||||
golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210306175010-7e3b8371a1bf
|
||||
golang.zx2c4.com/wireguard/windows v0.3.8
|
||||
)
|
||||
|
|
127
go.sum
127
go.sum
|
@ -1,80 +1,75 @@
|
|||
github.com/Arceliar/phony v0.0.0-20191006174943-d0c68492aca0 h1:p3puK8Sl2xK+2FnnIvY/C0N1aqJo2kbEsdAzU+Tnv48=
|
||||
github.com/Arceliar/phony v0.0.0-20191006174943-d0c68492aca0/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
|
||||
github.com/Arceliar/ironwood v0.0.0-20210531083357-daeea6bc386a h1:szuwvAlzD3r7PEcv4d3tin0V/4ZS/YQKL8RmLAXbE3E=
|
||||
github.com/Arceliar/ironwood v0.0.0-20210531083357-daeea6bc386a/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk=
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 h1:WndgpSW13S32VLQ3ugUxx2EnnWmgba1kCqPkd4Gk1yQ=
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/cheggaaa/pb/v3 v3.0.4 h1:QZEPYOj2ix6d5oEg63fbHmpolrnNiwjUsk+h74Yt4bM=
|
||||
github.com/cheggaaa/pb/v3 v3.0.4/go.mod h1:7rgWxLrAUcFMkvJuv09+DYi7mMUYi8nO9iOWcvGJPfw=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/cheggaaa/pb/v3 v3.0.6 h1:ULPm1wpzvj60FvmCrX7bIaB80UgbhI+zSaQJKRfCbAs=
|
||||
github.com/cheggaaa/pb/v3 v3.0.6/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8 h1:WD8iJ37bRNwvETMfVTusVSAi0WdXTpfNVGY2aHycNKY=
|
||||
github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
|
||||
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
|
||||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/gologme/log v1.2.0 h1:Ya5Ip/KD6FX7uH0S31QO87nCCSucKtF44TLbTtO7V4c=
|
||||
github.com/gologme/log v1.2.0/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
|
||||
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hjson/hjson-go v3.0.2-0.20200316202735-d5d0e8b0617d+incompatible h1:v6BPcb9q9U6JDVsuizxBr/piVB/2Y1Q5GWoBybvZVWI=
|
||||
github.com/hjson/hjson-go v3.0.2-0.20200316202735-d5d0e8b0617d+incompatible/go.mod h1:qsetwF8NlsTsOTwZTApNlTCerV+b2GjYRRcIk4JMFio=
|
||||
github.com/kardianos/minwinsvc v0.0.0-20151122163309-cad6b2b879b0 h1:YnZmFjg0Nvk8851WTVWlqMC1ecJH07Ctz+Ezxx4u54g=
|
||||
github.com/kardianos/minwinsvc v0.0.0-20151122163309-cad6b2b879b0/go.mod h1:rUi0/YffDo1oXBOGn1KRq7Fr07LX48XEBecQnmwjsAo=
|
||||
github.com/lxn/walk v0.0.0-20191128110447-55ccb3a9f5c1 h1:/QwQcwWVOQXcoNuV9tHx30gQ3q7jCE/rKcGjwzsa5tg=
|
||||
github.com/lxn/walk v0.0.0-20191128110447-55ccb3a9f5c1/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
||||
github.com/lxn/win v0.0.0-20191128105842-2da648fda5b4 h1:5BmtGkQbch91lglMHQ9JIDGiYCL3kBRBA0ItZTvOcEI=
|
||||
github.com/lxn/win v0.0.0-20191128105842-2da648fda5b4/go.mod h1:ouWl4wViUNh8tPSIwxTVMuS014WakR1hqvBc2I0bMoA=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/hjson/hjson-go v3.1.0+incompatible h1:DY/9yE8ey8Zv22bY+mHV1uk2yRy0h8tKhZ77hEdi0Aw=
|
||||
github.com/hjson/hjson-go v3.1.0+incompatible/go.mod h1:qsetwF8NlsTsOTwZTApNlTCerV+b2GjYRRcIk4JMFio=
|
||||
github.com/kardianos/minwinsvc v1.0.0 h1:+JfAi8IBJna0jY2dJGZqi7o15z13JelFIklJCAENALA=
|
||||
github.com/kardianos/minwinsvc v1.0.0/go.mod h1:Bgd0oc+D0Qo3bBytmNtyRKVlp85dAloLKhfxanPFFRc=
|
||||
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
||||
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
|
||||
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/neilalexander/mdns v0.3.1-0.20200509205547-9b2aa0712e91 h1:8/a+BIe+29iQPVAjHai+kvVHsiIAzTEHpxyF+ns3qi0=
|
||||
github.com/neilalexander/mdns v0.3.1-0.20200509205547-9b2aa0712e91/go.mod h1:aJ6mgvEacXD+y9JGp8jMaF8wcsTuSvR4YMU6agygcPQ=
|
||||
github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM=
|
||||
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I=
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
|
||||
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200301040627-c5d0d7b4ec88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225014209-683adc9d29d7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305215415-5cdee2b1b5a0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b h1:ggRgirZABFolTmi3sn6Ivd9SipZwLedQ5wR0aAKnFxU=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de h1:aYKJLPSrddB2N7/6OKyFqJ337SXpo61bBuvO5p1+7iY=
|
||||
golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c h1:SW/oilbeWd6f32u3ZvuYGqZ+wivcp//I3Dy/gByk7Wk=
|
||||
golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wireguard v0.0.20200122-0.20200214175355-9cbcff10dd3e/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4=
|
||||
golang.zx2c4.com/wireguard v0.0.20200320 h1:1vE6zVeO7fix9cJX1Z9ZQ+ikPIIx7vIyU0o0tLDD88g=
|
||||
golang.zx2c4.com/wireguard v0.0.20200320/go.mod h1:lDian4Sw4poJ04SgHh35nzMVwGSYlPumkdnHcucAQoY=
|
||||
golang.zx2c4.com/wireguard/windows v0.1.0 h1:742izt2DAJBpIQT+DvrzN58P9p7fO4BUFOgMzY9qVhw=
|
||||
golang.zx2c4.com/wireguard/windows v0.1.0/go.mod h1:EK7CxrFnicmYJ0ZCF6crBh2/EMMeSxMlqgLlwN0Kv9s=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210225140808-70b7b7158fc9/go.mod h1:39ZQQ95hUxDxT7opsWy/rtfgvXXc8s30qfZ02df69Fo=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210306175010-7e3b8371a1bf h1:AtdIMfzvVNPXN4kVY/yWS8mvpQogSwtCRJk2y/LBPpg=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210306175010-7e3b8371a1bf/go.mod h1:ojGPy+9W6ZSM8anL+xC67fvh8zPQJwA6KpFOHyDWLX4=
|
||||
golang.zx2c4.com/wireguard/windows v0.3.8 h1:FvfBEhdZZTwthLuPHdyP6zpivYL3enopxd4XpggAufM=
|
||||
golang.zx2c4.com/wireguard/windows v0.3.8/go.mod h1:lm7dxHcBuzMNq706Ge1tZKZKw4+19vG9dLOhoDX05HQ=
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,62 +0,0 @@
|
|||
import glob
|
||||
import sys
|
||||
inputDirPath = sys.argv[1]
|
||||
|
||||
inputFilePaths = glob.glob(inputDirPath+"/*")
|
||||
inputFilePaths.sort()
|
||||
|
||||
merged = dict()
|
||||
|
||||
stretches = []
|
||||
|
||||
total = 0
|
||||
for inputFilePath in inputFilePaths:
|
||||
print "Processing file {}".format(inputFilePath)
|
||||
with open(inputFilePath, 'r') as f:
|
||||
inData = f.readlines()
|
||||
pathsChecked = 0.
|
||||
avgStretch = 0.
|
||||
for line in inData:
|
||||
dat = line.rstrip('\n').split(' ')
|
||||
eHops = int(dat[0])
|
||||
nHops = int(dat[1])
|
||||
count = int(dat[2])
|
||||
if eHops not in merged: merged[eHops] = dict()
|
||||
if nHops not in merged[eHops]: merged[eHops][nHops] = 0
|
||||
merged[eHops][nHops] += count
|
||||
total += count
|
||||
pathsChecked += count
|
||||
stretch = float(nHops)/eHops
|
||||
avgStretch += stretch*count
|
||||
finStretch = avgStretch / max(1, pathsChecked)
|
||||
stretches.append(str(finStretch))
|
||||
|
||||
hopsUsed = 0.
|
||||
hopsNeeded = 0.
|
||||
avgStretch = 0.
|
||||
results = []
|
||||
for eHops in sorted(merged.keys()):
|
||||
for nHops in sorted(merged[eHops].keys()):
|
||||
count = merged[eHops][nHops]
|
||||
result = "{} {} {}".format(eHops, nHops, count)
|
||||
results.append(result)
|
||||
hopsUsed += nHops*count
|
||||
hopsNeeded += eHops*count
|
||||
stretch = float(nHops)/eHops
|
||||
avgStretch += stretch*count
|
||||
print result
|
||||
bandwidthUsage = hopsUsed/max(1, hopsNeeded)
|
||||
avgStretch /= max(1, total)
|
||||
|
||||
with open("results.txt", "w") as f:
|
||||
f.write('\n'.join(results))
|
||||
|
||||
with open("stretches.txt", "w") as f:
|
||||
f.write('\n'.join(stretches))
|
||||
|
||||
print "Total files processed: {}".format(len(inputFilePaths))
|
||||
print "Total paths found: {}".format(total)
|
||||
print "Bandwidth usage: {}".format(bandwidthUsage)
|
||||
print "Average stretch: {}".format(avgStretch)
|
||||
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
#!/bin/bash
|
||||
go run -tags debug misc/sim/treesim.go "$@"
|
|
@ -1,901 +0,0 @@
|
|||
# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology)
|
||||
# Steps:
|
||||
# 1: Pick any node, here I'm using highest nodeID
|
||||
# 2: Build spanning tree, each node stores path back to root
|
||||
# Optionally with weights for each hop
|
||||
# Ties broken by preferring a parent with higher degree
|
||||
# 3: Distance metric: self->peer + (via tree) peer->dest
|
||||
# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A)
|
||||
# 5: Source-route traffic using the better of those two paths
|
||||
|
||||
# Note: This makes no attempt to simulate a dynamic network
|
||||
# E.g. A node's peers cannot be disconnected
|
||||
|
||||
# TODO:
|
||||
# Make better use of drop?
|
||||
# In particular, we should be ignoring *all* recently dropped *paths* to the root
|
||||
# To minimize route flapping
|
||||
# Not really an issue in the sim, but probably needed for a real network
|
||||
|
||||
import array
|
||||
import gc
|
||||
import glob
|
||||
import gzip
|
||||
import heapq
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
|
||||
#############
|
||||
# Constants #
|
||||
#############
|
||||
|
||||
# Reminder of where link cost comes in
|
||||
LINK_COST = 1
|
||||
|
||||
# Timeout before dropping something, in simulated seconds
|
||||
TIMEOUT = 60
|
||||
|
||||
###########
|
||||
# Classes #
|
||||
###########
|
||||
|
||||
class PathInfo:
|
||||
def __init__(self, nodeID):
|
||||
self.nodeID = nodeID # e.g. IP
|
||||
self.coords = [] # Position in tree
|
||||
self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info
|
||||
self.degree = 0 # Number of peers the sender has, used to break ties
|
||||
# The above should be signed
|
||||
self.path = [nodeID] # Path to node (in path-vector route)
|
||||
self.time = 0 # Time info was updated, to keep track of e.g. timeouts
|
||||
self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots
|
||||
def clone(self):
|
||||
# Return a deep-enough copy of the path
|
||||
clone = PathInfo(None)
|
||||
clone.nodeID = self.nodeID
|
||||
clone.coords = self.coords[:]
|
||||
clone.tstamp = self.tstamp
|
||||
clone.degree = self.degree
|
||||
clone.path = self.path[:]
|
||||
clone.time = self.time
|
||||
clone.treeID = self.treeID
|
||||
return clone
|
||||
# End class PathInfo
|
||||
|
||||
class Node:
|
||||
def __init__(self, nodeID):
|
||||
self.info = PathInfo(nodeID) # Self NodeInfo
|
||||
self.root = None # PathInfo to node at root of tree
|
||||
self.drop = dict() # PathInfo to nodes from clus that have timed out
|
||||
self.peers = dict() # PathInfo to peers
|
||||
self.links = dict() # Links to peers (to pass messages)
|
||||
self.msgs = [] # Said messages
|
||||
self.table = dict() # Pre-computed lookup table of peer info
|
||||
|
||||
def tick(self):
|
||||
# Do periodic maintenance stuff, including push updates
|
||||
self.info.time += 1
|
||||
if self.info.time > self.info.tstamp + TIMEOUT/4:
|
||||
# Update timestamp at least once every 1/4 timeout period
|
||||
# This should probably be randomized in a real implementation
|
||||
self.info.tstamp = self.info.time
|
||||
self.info.degree = 0# TODO decide if degree should be used, len(self.peers)
|
||||
changed = False # Used to track when the network has converged
|
||||
changed |= self.cleanRoot()
|
||||
self.cleanDropped()
|
||||
# Should probably send messages infrequently if there's nothing new to report
|
||||
if self.info.tstamp == self.info.time:
|
||||
msg = self.createMessage()
|
||||
self.sendMessage(msg)
|
||||
return changed
|
||||
|
||||
def cleanRoot(self):
|
||||
changed = False
|
||||
if self.root and self.info.time - self.root.time > TIMEOUT:
|
||||
print "DEBUG: clean root,", self.root.path
|
||||
self.drop[self.root.treeID] = self.root
|
||||
self.root = None
|
||||
changed = True
|
||||
if not self.root or self.root.treeID < self.info.treeID:
|
||||
# No need to drop someone who'se worse than us
|
||||
self.info.coords = [self.info.nodeID]
|
||||
self.root = self.info.clone()
|
||||
changed = True
|
||||
elif self.root.treeID == self.info.treeID:
|
||||
self.root = self.info.clone()
|
||||
return changed
|
||||
|
||||
def cleanDropped(self):
|
||||
# May actually be a treeID... better to iterate over keys explicitly
|
||||
nodeIDs = sorted(self.drop.keys())
|
||||
for nodeID in nodeIDs:
|
||||
node = self.drop[nodeID]
|
||||
if self.info.time - node.time > 4*TIMEOUT:
|
||||
del self.drop[nodeID]
|
||||
return None
|
||||
|
||||
def createMessage(self):
|
||||
# Message is just a tuple
|
||||
# First element is the sender
|
||||
# Second element is the root
|
||||
# We will .clone() everything during the send operation
|
||||
msg = (self.info, self.root)
|
||||
return msg
|
||||
|
||||
def sendMessage(self, msg):
|
||||
for link in self.links.values():
|
||||
newMsg = (msg[0].clone(), msg[1].clone())
|
||||
link.msgs.append(newMsg)
|
||||
return None
|
||||
|
||||
def handleMessages(self):
|
||||
changed = False
|
||||
while self.msgs:
|
||||
changed |= self.handleMessage(self.msgs.pop())
|
||||
return changed
|
||||
|
||||
def handleMessage(self, msg):
|
||||
changed = False
|
||||
for node in msg:
|
||||
# Update the path and timestamp for the sender and root info
|
||||
node.path.append(self.info.nodeID)
|
||||
node.time = self.info.time
|
||||
# Update the sender's info in our list of peers
|
||||
sender = msg[0]
|
||||
self.peers[sender.nodeID] = sender
|
||||
# Decide if we want to update the root
|
||||
root = msg[1]
|
||||
updateRoot = False
|
||||
isSameParent = False
|
||||
isBetterParent = False
|
||||
if len(self.root.path) > 1 and len(root.path) > 1:
|
||||
parent = self.peers[self.root.path[-2]]
|
||||
if parent.nodeID == sender.nodeID: isSameParent = True
|
||||
if sender.degree > parent.degree:
|
||||
# This would also be where you check path uptime/reliability/whatever
|
||||
# All else being equal, we prefer parents with high degree
|
||||
# We are trusting peers to report degree correctly in this case
|
||||
# So expect some performance reduction if your peers aren't trustworthy
|
||||
# (Lies can increase average stretch by a few %)
|
||||
isBetterParent = True
|
||||
if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed
|
||||
elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass
|
||||
elif not self.root: updateRoot = True
|
||||
elif self.root.treeID < root.treeID: updateRoot = True
|
||||
elif self.root.treeID != root.treeID: pass
|
||||
elif self.root.tstamp > root.tstamp: pass
|
||||
elif len(root.path) < len(self.root.path): updateRoot = True
|
||||
elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True
|
||||
elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True
|
||||
if updateRoot:
|
||||
if not self.root or self.root.path != root.path: changed = True
|
||||
self.root = root
|
||||
self.info.coords = self.root.path
|
||||
return changed
|
||||
|
||||
def lookup(self, dest):
|
||||
# Note: Can loop in an unconverged network
|
||||
# The person looking up the route is responsible for checking for loops
|
||||
best = None
|
||||
bestDist = 0
|
||||
for node in self.peers.itervalues():
|
||||
# dist = distance to node + dist (on tree) from node to dest
|
||||
dist = len(node.path)-1 + treeDist(node.coords, dest.coords)
|
||||
if not best or dist < bestDist:
|
||||
best = node
|
||||
bestDist = dist
|
||||
if best:
|
||||
next = best.path[-2]
|
||||
assert next in self.peers
|
||||
return next
|
||||
else:
|
||||
# We failed to look something up
|
||||
# TODO some way to signal this which doesn't crash
|
||||
assert False
|
||||
|
||||
def initTable(self):
|
||||
# Pre-computes a lookup table for destination coords
|
||||
# Insert parent first so you prefer them as a next-hop
|
||||
self.table.clear()
|
||||
parent = self.info.nodeID
|
||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
||||
for peer in self.peers.itervalues():
|
||||
current = self.table
|
||||
for coord in peer.coords:
|
||||
if coord not in current: current[coord] = (peer.nodeID, dict())
|
||||
old = current[coord]
|
||||
next = old[1]
|
||||
oldPeer = self.peers[old[0]]
|
||||
oldDist = len(oldPeer.coords)
|
||||
oldDeg = oldPeer.degree
|
||||
newDist = len(peer.coords)
|
||||
newDeg = peer.degree
|
||||
# Prefer parent
|
||||
# Else prefer short distance from root
|
||||
# If equal distance, prefer high degree
|
||||
if peer.nodeID == parent: current[coord] = (peer.nodeID, next)
|
||||
elif newDist < oldDist: current[coord] = (peer.nodeID, next)
|
||||
elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next)
|
||||
current = next
|
||||
return None
|
||||
|
||||
def lookup_new(self, dest):
|
||||
# Use pre-computed lookup table to look up next hop for dest coords
|
||||
assert self.table
|
||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
||||
else: parent = None
|
||||
current = (parent, self.table)
|
||||
c = None
|
||||
for coord in dest.coords:
|
||||
c = coord
|
||||
if coord not in current[1]: break
|
||||
current = current[1][coord]
|
||||
next = current[0]
|
||||
if c in self.peers: next = c
|
||||
if next not in self.peers:
|
||||
assert next == None
|
||||
# You're the root of a different connected component
|
||||
# You'd drop the packet in this case
|
||||
# To make the path cache not die, need to return a valid next hop...
|
||||
# Returning self for that reason
|
||||
next = self.info.nodeID
|
||||
return next
|
||||
# End class Node
|
||||
|
||||
####################
|
||||
# Helper Functions #
|
||||
####################
|
||||
|
||||
def getIndexOfLCA(source, dest):
|
||||
# Return index of last common ancestor in source/dest coords
|
||||
# -1 if no common ancestor (e.g. different roots)
|
||||
lcaIdx = -1
|
||||
minLen = min(len(source), len(dest))
|
||||
for idx in xrange(minLen):
|
||||
if source[idx] == dest[idx]: lcaIdx = idx
|
||||
else: break
|
||||
return lcaIdx
|
||||
|
||||
def treePath(source, dest):
|
||||
# Return path with source at head and dest at tail
|
||||
lastMatch = getIndexOfLCA(source, dest)
|
||||
path = dest[-1:lastMatch:-1] + source[lastMatch:]
|
||||
assert path[0] == dest[-1]
|
||||
assert path[-1] == source[-1]
|
||||
return path
|
||||
|
||||
def treeDist(source, dest):
|
||||
dist = len(source) + len(dest)
|
||||
lcaIdx = getIndexOfLCA(source, dest)
|
||||
dist -= 2*(lcaIdx+1)
|
||||
return dist
|
||||
|
||||
def dijkstra(nodestore, startingNodeID):
|
||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
||||
nodeIDs = sorted(nodestore.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
idxs = dict()
|
||||
for nodeIdx in xrange(nNodes):
|
||||
nodeID = nodeIDs[nodeIdx]
|
||||
idxs[nodeID] = nodeIdx
|
||||
dists = array.array("H", [0]*nNodes)
|
||||
queue = [(0, startingNodeID)]
|
||||
while queue:
|
||||
dist, nodeID = heapq.heappop(queue)
|
||||
idx = idxs[nodeID]
|
||||
if not dists[idx]: # Unvisited, otherwise we skip it
|
||||
dists[idx] = dist
|
||||
for peer in nodestore[nodeID].links:
|
||||
if not dists[idxs[peer]]:
|
||||
# Peer is also unvisited, so add to queue
|
||||
heapq.heappush(queue, (dist+LINK_COST, peer))
|
||||
return dists
|
||||
|
||||
def dijkstrall(nodestore):
|
||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
||||
nodeIDs = sorted(nodestore.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
idxs = dict()
|
||||
for nodeIdx in xrange(nNodes):
|
||||
nodeID = nodeIDs[nodeIdx]
|
||||
idxs[nodeID] = nodeIdx
|
||||
dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end)
|
||||
for sourceIdx in xrange(nNodes):
|
||||
print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx])
|
||||
queue = [(0, sourceIdx)]
|
||||
while queue:
|
||||
dist, nodeIdx = heapq.heappop(queue)
|
||||
distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx)
|
||||
if not dists[distIdx]: # Unvisited, otherwise we skip it
|
||||
dists[distIdx] = dist
|
||||
for peer in nodestore[nodeIDs[nodeIdx]].links:
|
||||
pIdx = idxs[peer]
|
||||
pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx)
|
||||
if not dists[pdIdx]:
|
||||
# Peer is also unvisited, so add to queue
|
||||
heapq.heappush(queue, (dist+LINK_COST, pIdx))
|
||||
return dists
|
||||
|
||||
def linkNodes(node1, node2):
|
||||
node1.links[node2.info.nodeID] = node2
|
||||
node2.links[node1.info.nodeID] = node1
|
||||
|
||||
############################
|
||||
# Store topology functions #
|
||||
############################
|
||||
|
||||
def makeStoreSquareGrid(sideLength, randomize=True):
|
||||
# Simple grid in a sideLength*sideLength square
|
||||
# Just used to validate that the code runs
|
||||
store = dict()
|
||||
nodeIDs = list(range(sideLength*sideLength))
|
||||
if randomize: random.shuffle(nodeIDs)
|
||||
for nodeID in nodeIDs:
|
||||
store[nodeID] = Node(nodeID)
|
||||
for index in xrange(len(nodeIDs)):
|
||||
if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]])
|
||||
if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]])
|
||||
print "Grid store created, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
def makeStoreASRelGraph(pathToGraph):
|
||||
#Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type)
|
||||
with open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
store = dict()
|
||||
for line in inData:
|
||||
if line.strip()[0] == "#": continue # Skip comment lines
|
||||
line = line.replace('|'," ")
|
||||
nodes = map(int, line.split()[0:2])
|
||||
if nodes[0] not in store: store[nodes[0]] = Node(nodes[0])
|
||||
if nodes[1] not in store: store[nodes[1]] = Node(nodes[1])
|
||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0):
|
||||
with open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
store = dict()
|
||||
nodeDeg = dict()
|
||||
for line in inData:
|
||||
if line.strip()[0] == "#": continue # Skip comment lines
|
||||
line = line.replace('|'," ")
|
||||
nodes = map(int, line.split()[0:2])
|
||||
if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0
|
||||
if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0
|
||||
nodeDeg[nodes[0]] += 1
|
||||
nodeDeg[nodes[1]] += 1
|
||||
sortedNodes = sorted(nodeDeg.keys(), \
|
||||
key=lambda x: (nodeDeg[x], x), \
|
||||
reverse=True)
|
||||
maxDegNodeID = sortedNodes[degIdx]
|
||||
return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID)
|
||||
|
||||
def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID):
|
||||
with open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
store = dict()
|
||||
for line in inData:
|
||||
if line.strip()[0] == "#": continue # Skip comment lines
|
||||
line = line.replace('|'," ")
|
||||
nodes = map(int, line.split()[0:2])
|
||||
if nodes[0] not in store:
|
||||
store[nodes[0]] = Node(nodes[0])
|
||||
if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000
|
||||
if nodes[1] not in store:
|
||||
store[nodes[1]] = Node(nodes[1])
|
||||
if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000
|
||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
def makeStoreDimesEdges(pathToGraph, rootNodeID=None):
|
||||
# Read from a DIMES csv-formatted graph from a gzip file
|
||||
store = dict()
|
||||
with gzip.open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
size = len(inData)
|
||||
index = 0
|
||||
for edge in inData:
|
||||
if not index % 1000:
|
||||
pct = 100.0*index/size
|
||||
print "Processing edge {}, {:.2f}%".format(index, pct)
|
||||
index += 1
|
||||
dat = edge.rstrip().split(',')
|
||||
node1 = "N" + str(dat[0].strip())
|
||||
node2 = "N" + str(dat[1].strip())
|
||||
if '?' in node1 or '?' in node2: continue #Unknown node
|
||||
if node1 == rootNodeID: node1 = "R" + str(dat[0].strip())
|
||||
if node2 == rootNodeID: node2 = "R" + str(dat[1].strip())
|
||||
if node1 not in store: store[node1] = Node(node1)
|
||||
if node2 not in store: store[node2] = Node(node2)
|
||||
if node1 != node2: linkNodes(store[node1], store[node2])
|
||||
print "DIMES graph successfully imported, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
def makeStoreGeneratedGraph(pathToGraph, root=None):
|
||||
with open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
store = dict()
|
||||
for line in inData:
|
||||
if line.strip()[0] == "#": continue # Skip comment lines
|
||||
nodes = map(int, line.strip().split(' ')[0:2])
|
||||
node1 = nodes[0]
|
||||
node2 = nodes[1]
|
||||
if node1 == root: node1 += 1000000
|
||||
if node2 == root: node2 += 1000000
|
||||
if node1 not in store: store[node1] = Node(node1)
|
||||
if node2 not in store: store[node2] = Node(node2)
|
||||
linkNodes(store[node1], store[node2])
|
||||
print "Generated graph successfully imported, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
|
||||
############################################
|
||||
# Functions used as parts of network tests #
|
||||
############################################
|
||||
|
||||
def idleUntilConverged(store):
|
||||
nodeIDs = sorted(store.keys())
|
||||
timeOfLastChange = 0
|
||||
step = 0
|
||||
# Idle until the network has converged
|
||||
while step - timeOfLastChange < 4*TIMEOUT:
|
||||
step += 1
|
||||
print "Step: {}, last change: {}".format(step, timeOfLastChange)
|
||||
changed = False
|
||||
for nodeID in nodeIDs:
|
||||
# Update node status, send messages
|
||||
changed |= store[nodeID].tick()
|
||||
for nodeID in nodeIDs:
|
||||
# Process messages
|
||||
changed |= store[nodeID].handleMessages()
|
||||
if changed: timeOfLastChange = step
|
||||
initTables(store)
|
||||
return store
|
||||
|
||||
def getCacheIndex(nodes, sourceIndex, destIndex):
|
||||
return sourceIndex*nodes + destIndex
|
||||
|
||||
def initTables(store):
|
||||
nodeIDs = sorted(store.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
print "Initializing routing tables for {} nodes".format(nNodes)
|
||||
for idx in xrange(nNodes):
|
||||
nodeID = nodeIDs[idx]
|
||||
store[nodeID].initTable()
|
||||
print "Routing tables initialized"
|
||||
return None
|
||||
|
||||
def getCache(store):
|
||||
nodeIDs = sorted(store.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
nodeIdxs = dict()
|
||||
for nodeIdx in xrange(nNodes):
|
||||
nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx
|
||||
cache = array.array("H", [0]*nNodes*nNodes)
|
||||
for sourceIdx in xrange(nNodes):
|
||||
sourceID = nodeIDs[sourceIdx]
|
||||
print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID)
|
||||
for destIdx in xrange(nNodes):
|
||||
destID = nodeIDs[destIdx]
|
||||
if sourceID == destID: nextHop = destID # lookup would fail
|
||||
else: nextHop = store[sourceID].lookup(store[destID].info)
|
||||
nextHopIdx = nodeIdxs[nextHop]
|
||||
cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx
|
||||
return cache
|
||||
|
||||
def testPaths(store, dists):
|
||||
cache = getCache(store)
|
||||
nodeIDs = sorted(store.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
idxs = dict()
|
||||
for nodeIdx in xrange(nNodes):
|
||||
nodeID = nodeIDs[nodeIdx]
|
||||
idxs[nodeID] = nodeIdx
|
||||
results = dict()
|
||||
for sourceIdx in xrange(nNodes):
|
||||
sourceID = nodeIDs[sourceIdx]
|
||||
print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID)
|
||||
#dists = dijkstra(store, sourceID)
|
||||
for destIdx in xrange(nNodes):
|
||||
destID = nodeIDs[destIdx]
|
||||
if destID == sourceID: continue # Skip self
|
||||
distIdx = getCacheIndex(nNodes, sourceIdx, destIdx)
|
||||
eHops = dists[distIdx]
|
||||
if not eHops: continue # The network is split, no path exists
|
||||
hops = 0
|
||||
for pair in ((sourceIdx, destIdx),):
|
||||
nHops = 0
|
||||
locIdx = pair[0]
|
||||
dIdx = pair[1]
|
||||
while locIdx != dIdx:
|
||||
locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)]
|
||||
nHops += 1
|
||||
if not hops or nHops < hops: hops = nHops
|
||||
if eHops not in results: results[eHops] = dict()
|
||||
if hops not in results[eHops]: results[eHops][hops] = 0
|
||||
results[eHops][hops] += 1
|
||||
return results
|
||||
|
||||
def getAvgStretch(pathMatrix):
|
||||
avgStretch = 0.
|
||||
checked = 0.
|
||||
for eHops in sorted(pathMatrix.keys()):
|
||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
||||
count = pathMatrix[eHops][nHops]
|
||||
stretch = float(nHops)/float(max(1, eHops))
|
||||
avgStretch += stretch*count
|
||||
checked += count
|
||||
avgStretch /= max(1, checked)
|
||||
return avgStretch
|
||||
|
||||
def getMaxStretch(pathMatrix):
|
||||
maxStretch = 0.
|
||||
for eHops in sorted(pathMatrix.keys()):
|
||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
||||
stretch = float(nHops)/float(max(1, eHops))
|
||||
maxStretch = max(maxStretch, stretch)
|
||||
return maxStretch
|
||||
|
||||
def getCertSizes(store):
|
||||
# Returns nCerts frequency distribution
|
||||
# De-duplicates common certs (for shared prefixes in the path)
|
||||
sizes = dict()
|
||||
for node in store.values():
|
||||
certs = set()
|
||||
for peer in node.peers.values():
|
||||
pCerts = set()
|
||||
assert len(peer.path) == 2
|
||||
assert peer.coords[-1] == peer.path[0]
|
||||
hops = peer.coords + peer.path[1:]
|
||||
for hopIdx in xrange(len(hops)-1):
|
||||
send = hops[hopIdx]
|
||||
if send == node.info.nodeID: continue # We created it, already have it
|
||||
path = hops[0:hopIdx+2]
|
||||
# Each cert is signed by the sender
|
||||
# Includes information about the path from the sender to the next hop
|
||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
||||
cert = "{}:{}".format(send, path)
|
||||
certs.add(cert)
|
||||
size = len(certs)
|
||||
if size not in sizes: sizes[size] = 0
|
||||
sizes[size] += 1
|
||||
return sizes
|
||||
|
||||
def getMinLinkCertSizes(store):
|
||||
# Returns nCerts frequency distribution
|
||||
# De-duplicates common certs (for shared prefixes in the path)
|
||||
# Based on the minimum number of certs that must be traded through a particular link
|
||||
# Handled per link
|
||||
sizes = dict()
|
||||
for node in store.values():
|
||||
peerCerts = dict()
|
||||
for peer in node.peers.values():
|
||||
pCerts = set()
|
||||
assert len(peer.path) == 2
|
||||
assert peer.coords[-1] == peer.path[0]
|
||||
hops = peer.coords + peer.path[1:]
|
||||
for hopIdx in xrange(len(hops)-1):
|
||||
send = hops[hopIdx]
|
||||
if send == node.info.nodeID: continue # We created it, already have it
|
||||
path = hops[0:hopIdx+2]
|
||||
# Each cert is signed by the sender
|
||||
# Includes information about the path from the sender to the next hop
|
||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
||||
cert = "{}:{}".format(send, path)
|
||||
pCerts.add(cert)
|
||||
peerCerts[peer.nodeID] = pCerts
|
||||
for peer in peerCerts:
|
||||
size = 0
|
||||
pCerts = peerCerts[peer]
|
||||
for cert in pCerts:
|
||||
required = True
|
||||
for p2 in peerCerts:
|
||||
if p2 == peer: continue
|
||||
p2Certs = peerCerts[p2]
|
||||
if cert in p2Certs: required = False
|
||||
if required: size += 1
|
||||
if size not in sizes: sizes[size] = 0
|
||||
sizes[size] += 1
|
||||
return sizes
|
||||
|
||||
def getPathSizes(store):
|
||||
# Returns frequency distribution of the total number of hops the routing table
|
||||
# I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15
|
||||
sizes = dict()
|
||||
for node in store.values():
|
||||
size = 0
|
||||
for peer in node.peers.values():
|
||||
assert len(peer.path) == 2
|
||||
assert peer.coords[-1] == peer.path[0]
|
||||
peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1
|
||||
size += peerSize
|
||||
if size not in sizes: sizes[size] = 0
|
||||
sizes[size] += 1
|
||||
return sizes
|
||||
|
||||
def getPeerSizes(store):
|
||||
# Returns frequency distribution of the number of peers each node has
|
||||
sizes = dict()
|
||||
for node in store.values():
|
||||
nPeers = len(node.peers)
|
||||
if nPeers not in sizes: sizes[nPeers] = 0
|
||||
sizes[nPeers] += 1
|
||||
return sizes
|
||||
|
||||
def getAvgSize(sizes):
|
||||
sumSizes = 0
|
||||
nNodes = 0
|
||||
for size in sizes:
|
||||
count = sizes[size]
|
||||
sumSizes += size*count
|
||||
nNodes += count
|
||||
avgSize = float(sumSizes)/max(1, nNodes)
|
||||
return avgSize
|
||||
|
||||
def getMaxSize(sizes):
|
||||
return max(sizes.keys())
|
||||
|
||||
def getMinSize(sizes):
|
||||
return min(sizes.keys())
|
||||
|
||||
def getResults(pathMatrix):
|
||||
results = []
|
||||
for eHops in sorted(pathMatrix.keys()):
|
||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
||||
count = pathMatrix[eHops][nHops]
|
||||
results.append("{} {} {}".format(eHops, nHops, count))
|
||||
return '\n'.join(results)
|
||||
|
||||
####################################
|
||||
# Functions to run different tests #
|
||||
####################################
|
||||
|
||||
def runTest(store):
|
||||
# Runs the usual set of tests on the store
|
||||
# Does not save results, so only meant for quick tests
|
||||
# To e.g. check the code works, maybe warm up the pypy jit
|
||||
for node in store.values():
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Begin testing network"
|
||||
dists = None
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
peers = getPeerSizes(store)
|
||||
certs = getCertSizes(store)
|
||||
paths = getPathSizes(store)
|
||||
linkCerts = getMinLinkCertSizes(store)
|
||||
avgPeerSize = getAvgSize(peers)
|
||||
maxPeerSize = getMaxSize(peers)
|
||||
avgCertSize = getAvgSize(certs)
|
||||
maxCertSize = getMaxSize(certs)
|
||||
avgPathSize = getAvgSize(paths)
|
||||
maxPathSize = getMaxSize(paths)
|
||||
avgLinkCert = getAvgSize(linkCerts)
|
||||
maxLinkCert = getMaxSize(linkCerts)
|
||||
totalCerts = sum(map(lambda x: x*certs[x], certs.keys()))
|
||||
totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links
|
||||
avgCertsPerLink = float(totalCerts)/max(1, totalLinks)
|
||||
print "Finished testing network"
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize)
|
||||
print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize)
|
||||
print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize)
|
||||
print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert)
|
||||
print "Avg certs per link (one-way): {}".format(avgCertsPerLink)
|
||||
return # End of function
|
||||
|
||||
def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1):
|
||||
# Checks performance for every possible choice of root node
|
||||
# Saves output for each root node to a separate file on disk
|
||||
# path = input path to some caida.org formatted AS-relationship graph
|
||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
||||
assert os.path.exists(outDir)
|
||||
store = makeStoreASRelGraph(path)
|
||||
nodes = sorted(store.keys())
|
||||
for nodeIdx in xrange(len(nodes)):
|
||||
if nodeIdx % proc != 0: continue # Work belongs to someone else
|
||||
rootNodeID = nodes[nodeIdx]
|
||||
outpath = outDir+"/{}".format(rootNodeID)
|
||||
if os.path.exists(outpath):
|
||||
print "Skipping {}, already processed".format(rootNodeID)
|
||||
continue
|
||||
store = makeStoreASRelGraphFixedRoot(path, rootNodeID)
|
||||
for node in store.values():
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Beginning {}, size {}".format(nodeIdx, len(store))
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
results = getResults(pathMatrix)
|
||||
with open(outpath, "w") as f:
|
||||
f.write(results)
|
||||
print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store))
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
#break # Stop after 1, because they can take forever
|
||||
return # End of function
|
||||
|
||||
def timelineASTest():
|
||||
# Meant to study the performance of the network as a function of network size
|
||||
# Loops over a set of AS-relationship graphs
|
||||
# Runs a test on each graph, selecting highest-degree node as the root
|
||||
# Saves results for each graph to a separate file on disk
|
||||
outDir = "output-treesim-timeline-AS"
|
||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
||||
assert os.path.exists(outDir)
|
||||
paths = sorted(glob.glob("asrel/datasets/*"))
|
||||
for path in paths:
|
||||
date = os.path.basename(path).split(".")[0]
|
||||
outpath = outDir+"/{}".format(date)
|
||||
if os.path.exists(outpath):
|
||||
print "Skipping {}, already processed".format(date)
|
||||
continue
|
||||
store = makeStoreASRelGraphMaxDeg(path)
|
||||
dists = None
|
||||
for node in store.values():
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Beginning {}, size {}".format(date, len(store))
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
results = getResults(pathMatrix)
|
||||
with open(outpath, "w") as f:
|
||||
f.write(results)
|
||||
print "Finished {} with {} nodes".format(date, len(store))
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
#break # Stop after 1, because they can take forever
|
||||
return # End of function
|
||||
|
||||
def timelineDimesTest():
|
||||
# Meant to study the performance of the network as a function of network size
|
||||
# Loops over a set of AS-relationship graphs
|
||||
# Runs a test on each graph, selecting highest-degree node as the root
|
||||
# Saves results for each graph to a separate file on disk
|
||||
outDir = "output-treesim-timeline-dimes"
|
||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
||||
assert os.path.exists(outDir)
|
||||
# Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year
|
||||
paths = sorted(glob.glob("DIMES/ASEdges/*.gz"))
|
||||
exists = set(glob.glob(outDir+"/*"))
|
||||
for path in paths:
|
||||
date = os.path.basename(path).split(".")[0]
|
||||
outpath = outDir+"/{}".format(date)
|
||||
if outpath in exists:
|
||||
print "Skipping {}, already processed".format(date)
|
||||
continue
|
||||
store = makeStoreDimesEdges(path)
|
||||
# Get the highest degree node and make it root
|
||||
# Sorted by nodeID just to make it stable in the event of a tie
|
||||
nodeIDs = sorted(store.keys())
|
||||
bestRoot = ""
|
||||
bestDeg = 0
|
||||
for nodeID in nodeIDs:
|
||||
node = store[nodeID]
|
||||
if len(node.links) > bestDeg:
|
||||
bestRoot = nodeID
|
||||
bestDeg = len(node.links)
|
||||
assert bestRoot
|
||||
store = makeStoreDimesEdges(path, bestRoot)
|
||||
rootID = "R" + bestRoot[1:]
|
||||
assert rootID in store
|
||||
# Don't forget to set random seed before setting times
|
||||
# To make results reproducible
|
||||
nodeIDs = sorted(store.keys())
|
||||
random.seed(12345)
|
||||
for nodeID in nodeIDs:
|
||||
node = store[nodeID]
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Beginning {}, size {}".format(date, len(store))
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
results = getResults(pathMatrix)
|
||||
with open(outpath, "w") as f:
|
||||
f.write(results)
|
||||
print "Finished {} with {} nodes".format(date, len(store))
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
break # Stop after 1, because they can take forever
|
||||
return # End of function
|
||||
|
||||
def scalingTest(maxTests=None, inputDir="graphs"):
|
||||
# Meant to study the performance of the network as a function of network size
|
||||
# Loops over a set of nodes in a previously generated graph
|
||||
# Runs a test on each graph, testing each node as the root
|
||||
# if maxTests is set, tests only that number of roots (highest degree first)
|
||||
# Saves results for each graph to a separate file on disk
|
||||
outDir = "output-treesim-{}".format(inputDir)
|
||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
||||
assert os.path.exists(outDir)
|
||||
paths = sorted(glob.glob("{}/*".format(inputDir)))
|
||||
exists = set(glob.glob(outDir+"/*"))
|
||||
for path in paths:
|
||||
gc.collect() # pypy waits for gc to close files
|
||||
graph = os.path.basename(path).split(".")[0]
|
||||
store = makeStoreGeneratedGraph(path)
|
||||
# Get the highest degree node and make it root
|
||||
# Sorted by nodeID just to make it stable in the event of a tie
|
||||
nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True)
|
||||
dists = None
|
||||
if maxTests: nodeIDs = nodeIDs[:maxTests]
|
||||
for nodeID in nodeIDs:
|
||||
nodeIDStr = str(nodeID).zfill(len(str(len(store)-1)))
|
||||
outpath = outDir+"/{}-{}".format(graph, nodeIDStr)
|
||||
if outpath in exists:
|
||||
print "Skipping {}-{}, already processed".format(graph, nodeIDStr)
|
||||
continue
|
||||
store = makeStoreGeneratedGraph(path, nodeID)
|
||||
# Don't forget to set random seed before setting times
|
||||
random.seed(12345) # To make results reproducible
|
||||
nIDs = sorted(store.keys())
|
||||
for nID in nIDs:
|
||||
node = store[nID]
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Beginning {}, size {}".format(graph, len(store))
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
results = getResults(pathMatrix)
|
||||
with open(outpath, "w") as f:
|
||||
f.write(results)
|
||||
print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID)
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
return # End of function
|
||||
|
||||
##################
|
||||
# Main Execution #
|
||||
##################
|
||||
|
||||
if __name__ == "__main__":
|
||||
if True: # Run a quick test
|
||||
random.seed(12345) # DEBUG
|
||||
store = makeStoreSquareGrid(4)
|
||||
runTest(store) # Quick test
|
||||
store = None
|
||||
# Do some real work
|
||||
#runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz"))
|
||||
#timelineDimesTest()
|
||||
#rootNodeASTest("asrel/datasets/19980101.as-rel.txt")
|
||||
#timelineASTest()
|
||||
#rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype")
|
||||
#scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph
|
||||
#store = makeStoreGeneratedGraph("bgp_tables")
|
||||
#store = makeStoreGeneratedGraph("skitter")
|
||||
#store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919
|
||||
#store = makeStoreGeneratedGraph("fc00-2017-08-12.txt")
|
||||
if store: runTest(store)
|
||||
#rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1)
|
||||
#scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph
|
||||
#scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph
|
||||
#scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph
|
||||
#scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph
|
||||
if not store:
|
||||
import sys
|
||||
args = sys.argv
|
||||
if len(args) == 2:
|
||||
job_number = int(sys.argv[1])
|
||||
rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
|
||||
else:
|
||||
print "Usage: {} job_number".format(args[0])
|
||||
print "job_number = which job set to run on this node (1-indexed)"
|
||||
|
|
@ -1,455 +0,0 @@
|
|||
package main
|
||||
|
||||
import "fmt"
|
||||
import "bufio"
|
||||
import "os"
|
||||
import "strings"
|
||||
import "strconv"
|
||||
import "time"
|
||||
|
||||
import "runtime"
|
||||
import "runtime/pprof"
|
||||
import "flag"
|
||||
|
||||
import "github.com/gologme/log"
|
||||
|
||||
import . "github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
import . "github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type Node struct {
|
||||
index int
|
||||
core Core
|
||||
send chan<- []byte
|
||||
recv <-chan []byte
|
||||
}
|
||||
|
||||
func (n *Node) init(index int) {
|
||||
n.index = index
|
||||
n.core.Init()
|
||||
n.send = n.core.DEBUG_getSend()
|
||||
n.recv = n.core.DEBUG_getRecv()
|
||||
n.core.DEBUG_simFixMTU()
|
||||
}
|
||||
|
||||
func (n *Node) printTraffic() {
|
||||
for {
|
||||
packet := <-n.recv
|
||||
fmt.Println(n.index, packet)
|
||||
//panic("Got a packet")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) startPeers() {
|
||||
//for _, p := range n.core.Peers.Ports {
|
||||
// go p.MainLoop()
|
||||
//}
|
||||
//go n.printTraffic()
|
||||
//n.core.Peers.DEBUG_startPeers()
|
||||
}
|
||||
|
||||
func linkNodes(m, n *Node) {
|
||||
// Don't allow duplicates
|
||||
if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigningPublicKey()) {
|
||||
return
|
||||
}
|
||||
// Create peers
|
||||
// Buffering reduces packet loss in the sim
|
||||
// This slightly speeds up testing (fewer delays before retrying a ping)
|
||||
pLinkPub, pLinkPriv := m.core.DEBUG_newBoxKeys()
|
||||
qLinkPub, qLinkPriv := m.core.DEBUG_newBoxKeys()
|
||||
p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getEncryptionPublicKey(),
|
||||
n.core.DEBUG_getSigningPublicKey(), *m.core.DEBUG_getSharedKey(pLinkPriv, qLinkPub))
|
||||
q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getEncryptionPublicKey(),
|
||||
m.core.DEBUG_getSigningPublicKey(), *n.core.DEBUG_getSharedKey(qLinkPriv, pLinkPub))
|
||||
DEBUG_simLinkPeers(p, q)
|
||||
return
|
||||
}
|
||||
|
||||
func makeStoreSquareGrid(sideLength int) map[int]*Node {
|
||||
store := make(map[int]*Node)
|
||||
nNodes := sideLength * sideLength
|
||||
idxs := make([]int, 0, nNodes)
|
||||
// TODO shuffle nodeIDs
|
||||
for idx := 1; idx <= nNodes; idx++ {
|
||||
idxs = append(idxs, idx)
|
||||
}
|
||||
for _, idx := range idxs {
|
||||
node := &Node{}
|
||||
node.init(idx)
|
||||
store[idx] = node
|
||||
}
|
||||
for idx := 0; idx < nNodes; idx++ {
|
||||
if (idx % sideLength) != 0 {
|
||||
linkNodes(store[idxs[idx]], store[idxs[idx-1]])
|
||||
}
|
||||
if idx >= sideLength {
|
||||
linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]])
|
||||
}
|
||||
}
|
||||
//for _, node := range store { node.initPorts() }
|
||||
return store
|
||||
}
|
||||
|
||||
func makeStoreStar(nNodes int) map[int]*Node {
|
||||
store := make(map[int]*Node)
|
||||
center := &Node{}
|
||||
center.init(0)
|
||||
store[0] = center
|
||||
for idx := 1; idx < nNodes; idx++ {
|
||||
node := &Node{}
|
||||
node.init(idx)
|
||||
store[idx] = node
|
||||
linkNodes(center, node)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func loadGraph(path string) map[int]*Node {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer f.Close()
|
||||
store := make(map[int]*Node)
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
nodeIdxstrs := strings.Split(line, " ")
|
||||
nodeIdx0, _ := strconv.Atoi(nodeIdxstrs[0])
|
||||
nodeIdx1, _ := strconv.Atoi(nodeIdxstrs[1])
|
||||
if store[nodeIdx0] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeIdx0)
|
||||
store[nodeIdx0] = node
|
||||
}
|
||||
if store[nodeIdx1] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeIdx1)
|
||||
store[nodeIdx1] = node
|
||||
}
|
||||
linkNodes(store[nodeIdx0], store[nodeIdx1])
|
||||
}
|
||||
//for _, node := range store { node.initPorts() }
|
||||
return store
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func startNetwork(store map[[32]byte]*Node) {
|
||||
for _, node := range store {
|
||||
node.startPeers()
|
||||
}
|
||||
}
|
||||
|
||||
func getKeyedStore(store map[int]*Node) map[[32]byte]*Node {
|
||||
newStore := make(map[[32]byte]*Node)
|
||||
for _, node := range store {
|
||||
newStore[node.core.DEBUG_getSigningPublicKey()] = node
|
||||
}
|
||||
return newStore
|
||||
}
|
||||
|
||||
func testPaths(store map[[32]byte]*Node) bool {
|
||||
nNodes := len(store)
|
||||
count := 0
|
||||
for _, source := range store {
|
||||
count++
|
||||
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.index)
|
||||
for _, dest := range store {
|
||||
//if source == dest { continue }
|
||||
destLoc := dest.core.DEBUG_getLocator()
|
||||
coords := destLoc.DEBUG_getCoords()
|
||||
temp := 0
|
||||
ttl := ^uint64(0)
|
||||
oldTTL := ttl
|
||||
for here := source; here != dest; {
|
||||
temp++
|
||||
if temp > 4096 {
|
||||
fmt.Println("Loop?")
|
||||
time.Sleep(time.Second)
|
||||
return false
|
||||
}
|
||||
nextPort := here.core.DEBUG_switchLookup(coords)
|
||||
// First check if "here" is accepting packets from the previous node
|
||||
// TODO explain how this works
|
||||
ports := here.core.DEBUG_getPeers().DEBUG_getPorts()
|
||||
nextPeer := ports[nextPort]
|
||||
if nextPeer == nil {
|
||||
fmt.Println("Peer associated with next port is nil")
|
||||
return false
|
||||
}
|
||||
next := store[nextPeer.DEBUG_getSigKey()]
|
||||
/*
|
||||
if next == here {
|
||||
//for idx, link := range here.links {
|
||||
// fmt.Println("DUMP:", idx, link.nodeID)
|
||||
//}
|
||||
if nextPort != 0 { panic("This should not be") }
|
||||
fmt.Println("Failed to route:", source.index, here.index, dest.index, oldTTL, ttl)
|
||||
//here.table.DEBUG_dumpTable()
|
||||
//fmt.Println("Ports:", here.nodeID, here.ports)
|
||||
return false
|
||||
panic(fmt.Sprintln("Routing Loop:",
|
||||
source.index,
|
||||
here.index,
|
||||
dest.index))
|
||||
}
|
||||
*/
|
||||
if temp > 4090 {
|
||||
fmt.Println("DEBUG:",
|
||||
source.index, source.core.DEBUG_getLocator(),
|
||||
here.index, here.core.DEBUG_getLocator(),
|
||||
dest.index, dest.core.DEBUG_getLocator())
|
||||
//here.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
|
||||
}
|
||||
if here != source {
|
||||
// This is sufficient to check for routing loops or blackholes
|
||||
//break
|
||||
}
|
||||
if here == next {
|
||||
fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL)
|
||||
return false
|
||||
}
|
||||
here = next
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func stressTest(store map[[32]byte]*Node) {
|
||||
fmt.Println("Stress testing network...")
|
||||
nNodes := len(store)
|
||||
dests := make([][]byte, 0, nNodes)
|
||||
for _, dest := range store {
|
||||
loc := dest.core.DEBUG_getLocator()
|
||||
coords := loc.DEBUG_getCoords()
|
||||
dests = append(dests, coords)
|
||||
}
|
||||
lookups := 0
|
||||
start := time.Now()
|
||||
for _, source := range store {
|
||||
for _, coords := range dests {
|
||||
source.core.DEBUG_switchLookup(coords)
|
||||
lookups++
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Printf("%d lookups in %s (%f lookups per second)\n",
|
||||
lookups,
|
||||
timed,
|
||||
float64(lookups)/timed.Seconds())
|
||||
}
|
||||
|
||||
func pingNodes(store map[[32]byte]*Node) {
|
||||
fmt.Println("Sending pings...")
|
||||
nNodes := len(store)
|
||||
count := 0
|
||||
equiv := func(a []byte, b []byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for idx := 0; idx < len(a); idx++ {
|
||||
if a[idx] != b[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
for _, source := range store {
|
||||
count++
|
||||
//if count > 16 { break }
|
||||
fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
|
||||
sourceKey := source.core.DEBUG_getEncryptionPublicKey()
|
||||
payload := sourceKey[:]
|
||||
sourceAddr := source.core.DEBUG_getAddr()[:]
|
||||
sendTo := func(bs []byte, destAddr []byte) {
|
||||
packet := make([]byte, 40+len(bs))
|
||||
copy(packet[8:24], sourceAddr)
|
||||
copy(packet[24:40], destAddr)
|
||||
copy(packet[40:], bs)
|
||||
packet[0] = 6 << 4
|
||||
source.send <- packet
|
||||
}
|
||||
destCount := 0
|
||||
for _, dest := range store {
|
||||
destCount += 1
|
||||
fmt.Printf("%d Nodes, %d Send, %d Recv\n", nNodes, count, destCount)
|
||||
if dest == source {
|
||||
fmt.Println("Skipping self")
|
||||
continue
|
||||
}
|
||||
destAddr := dest.core.DEBUG_getAddr()[:]
|
||||
ticker := time.NewTicker(150 * time.Millisecond)
|
||||
sendTo(payload, destAddr)
|
||||
for loop := true; loop; {
|
||||
select {
|
||||
case packet := <-dest.recv:
|
||||
{
|
||||
if equiv(payload, packet[len(packet)-len(payload):]) {
|
||||
loop = false
|
||||
}
|
||||
}
|
||||
case <-ticker.C:
|
||||
sendTo(payload, destAddr)
|
||||
//dumpDHTSize(store) // note that this uses racey functions to read things...
|
||||
}
|
||||
}
|
||||
ticker.Stop()
|
||||
}
|
||||
//break // Only try sending pings from 1 node
|
||||
// This is because, for some reason, stopTun() doesn't always close it
|
||||
// And if two tuns are up, bad things happen (sends via wrong interface)
|
||||
}
|
||||
fmt.Println("Finished pinging nodes")
|
||||
}
|
||||
|
||||
func pingBench(store map[[32]byte]*Node) {
|
||||
fmt.Println("Benchmarking pings...")
|
||||
nPings := 0
|
||||
payload := make([]byte, 1280+40) // MTU + ipv6 header
|
||||
var timed time.Duration
|
||||
//nNodes := len(store)
|
||||
count := 0
|
||||
for _, source := range store {
|
||||
count++
|
||||
//fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
|
||||
getPing := func(key [32]byte, decodedCoords []byte) []byte {
|
||||
// TODO write some function to do this the right way, put... somewhere...
|
||||
coords := DEBUG_wire_encode_coords(decodedCoords)
|
||||
packet := make([]byte, 0, len(key)+len(coords)+len(payload))
|
||||
packet = append(packet, key[:]...)
|
||||
packet = append(packet, coords...)
|
||||
packet = append(packet, payload[:]...)
|
||||
return packet
|
||||
}
|
||||
for _, dest := range store {
|
||||
key := dest.core.DEBUG_getEncryptionPublicKey()
|
||||
loc := dest.core.DEBUG_getLocator()
|
||||
coords := loc.DEBUG_getCoords()
|
||||
ping := getPing(key, coords)
|
||||
// TODO make sure the session is open first
|
||||
start := time.Now()
|
||||
for i := 0; i < 1000000; i++ {
|
||||
source.send <- ping
|
||||
nPings++
|
||||
}
|
||||
timed += time.Since(start)
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
fmt.Printf("Sent %d pings in %s (%f per second)\n",
|
||||
nPings,
|
||||
timed,
|
||||
float64(nPings)/timed.Seconds())
|
||||
}
|
||||
|
||||
func dumpStore(store map[NodeID]*Node) {
|
||||
for _, node := range store {
|
||||
fmt.Println("DUMPSTORE:", node.index, node.core.DEBUG_getLocator())
|
||||
node.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
|
||||
}
|
||||
}
|
||||
|
||||
func dumpDHTSize(store map[[32]byte]*Node) {
|
||||
var min, max, sum int
|
||||
for _, node := range store {
|
||||
num := node.core.DEBUG_getDHTSize()
|
||||
min = num
|
||||
max = num
|
||||
break
|
||||
}
|
||||
for _, node := range store {
|
||||
num := node.core.DEBUG_getDHTSize()
|
||||
if num < min {
|
||||
min = num
|
||||
}
|
||||
if num > max {
|
||||
max = num
|
||||
}
|
||||
sum += num
|
||||
}
|
||||
avg := float64(sum) / float64(len(store))
|
||||
fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max)
|
||||
}
|
||||
|
||||
func (n *Node) startTCP(listen string) {
|
||||
n.core.DEBUG_setupAndStartGlobalTCPInterface(listen)
|
||||
}
|
||||
|
||||
func (n *Node) connectTCP(remoteAddr string) {
|
||||
n.core.AddPeer(remoteAddr, remoteAddr)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
fmt.Println("Test")
|
||||
Util_testAddrIDMask()
|
||||
idxstore := makeStoreSquareGrid(4)
|
||||
//idxstore := makeStoreStar(256)
|
||||
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
|
||||
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
|
||||
//idxstore := loadGraph("skitter")
|
||||
kstore := getKeyedStore(idxstore)
|
||||
//*
|
||||
logger := log.New(os.Stderr, "", log.Flags())
|
||||
for _, n := range kstore {
|
||||
n.core.DEBUG_setLogger(logger)
|
||||
}
|
||||
//*/
|
||||
startNetwork(kstore)
|
||||
//time.Sleep(10*time.Second)
|
||||
// Note that testPaths only works if pressure is turned off
|
||||
// Otherwise congestion can lead to routing loops?
|
||||
for finished := false; !finished; {
|
||||
finished = testPaths(kstore)
|
||||
}
|
||||
pingNodes(kstore)
|
||||
//pingBench(kstore) // Only after disabling debug output
|
||||
//stressTest(kstore)
|
||||
//time.Sleep(120 * time.Second)
|
||||
dumpDHTSize(kstore) // note that this uses racey functions to read things...
|
||||
if false {
|
||||
// This connects the sim to the local network
|
||||
for _, node := range kstore {
|
||||
node.startTCP("localhost:0")
|
||||
node.connectTCP("localhost:12345")
|
||||
break // just 1
|
||||
}
|
||||
for _, node := range kstore {
|
||||
go func() {
|
||||
// Just dump any packets sent to this node
|
||||
for range node.recv {
|
||||
}
|
||||
}()
|
||||
}
|
||||
var block chan struct{}
|
||||
<-block
|
||||
}
|
||||
runtime.GC()
|
||||
}
|
|
@ -3,9 +3,7 @@
|
|||
package address
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"crypto/ed25519"
|
||||
)
|
||||
|
||||
// Address represents an IPv6 address in the yggdrasil address range.
|
||||
|
@ -45,25 +43,34 @@ func (s *Subnet) IsValid() bool {
|
|||
return (*s)[l-1] == prefix[l-1]|0x01
|
||||
}
|
||||
|
||||
// AddrForNodeID takes a *NodeID as an argument and returns an *Address.
|
||||
// AddrForKey takes an ed25519.PublicKey as an argument and returns an *Address.
|
||||
// This function returns nil if the key length is not ed25519.PublicKeySize.
|
||||
// This address begins with the contents of GetPrefix(), with the last bit set to 0 to indicate an address.
|
||||
// The following 8 bits are set to the number of leading 1 bits in the NodeID.
|
||||
// The NodeID, excluding the leading 1 bits and the first leading 0 bit, is truncated to the appropriate length and makes up the remainder of the address.
|
||||
func AddrForNodeID(nid *crypto.NodeID) *Address {
|
||||
// The following 8 bits are set to the number of leading 1 bits in the bitwise inverse of the public key.
|
||||
// The bitwise inverse of the key, excluding the leading 1 bits and the first leading 0 bit, is truncated to the appropriate length and makes up the remainder of the address.
|
||||
func AddrForKey(publicKey ed25519.PublicKey) *Address {
|
||||
// 128 bit address
|
||||
// Begins with prefix
|
||||
// Next bit is a 0
|
||||
// Next 7 bits, interpreted as a uint, are # of leading 1s in the NodeID
|
||||
// Leading 1s and first leading 0 of the NodeID are truncated off
|
||||
// The rest is appended to the IPv6 address (truncated to 128 bits total)
|
||||
if len(publicKey) != ed25519.PublicKeySize {
|
||||
return nil
|
||||
}
|
||||
var buf [ed25519.PublicKeySize]byte
|
||||
copy(buf[:], publicKey)
|
||||
for idx := range buf {
|
||||
buf[idx] = ^buf[idx]
|
||||
}
|
||||
var addr Address
|
||||
var temp []byte
|
||||
done := false
|
||||
ones := byte(0)
|
||||
bits := byte(0)
|
||||
nBits := 0
|
||||
for idx := 0; idx < 8*len(nid); idx++ {
|
||||
bit := (nid[idx/8] & (0x80 >> byte(idx%8))) >> byte(7-(idx%8))
|
||||
for idx := 0; idx < 8*len(buf); idx++ {
|
||||
bit := (buf[idx/8] & (0x80 >> byte(idx%8))) >> byte(7-(idx%8))
|
||||
if !done && bit != 0 {
|
||||
ones++
|
||||
continue
|
||||
|
@ -86,15 +93,19 @@ func AddrForNodeID(nid *crypto.NodeID) *Address {
|
|||
return &addr
|
||||
}
|
||||
|
||||
// SubnetForNodeID takes a *NodeID as an argument and returns an *Address.
|
||||
// This subnet begins with the address prefix, with the last bit set to 1 to indicate a prefix.
|
||||
// The following 8 bits are set to the number of leading 1 bits in the NodeID.
|
||||
// The NodeID, excluding the leading 1 bits and the first leading 0 bit, is truncated to the appropriate length and makes up the remainder of the subnet.
|
||||
func SubnetForNodeID(nid *crypto.NodeID) *Subnet {
|
||||
// SubnetForKey takes an ed25519.PublicKey as an argument and returns a *Subnet.
|
||||
// This function returns nil if the key length is not ed25519.PublicKeySize.
|
||||
// The subnet begins with the address prefix, with the last bit set to 1 to indicate a prefix.
|
||||
// The following 8 bits are set to the number of leading 1 bits in the bitwise inverse of the key.
|
||||
// The bitwise inverse of the key, excluding the leading 1 bits and the first leading 0 bit, is truncated to the appropriate length and makes up the remainder of the subnet.
|
||||
func SubnetForKey(publicKey ed25519.PublicKey) *Subnet {
|
||||
// Exactly as the address version, with two exceptions:
|
||||
// 1) The first bit after the fixed prefix is a 1 instead of a 0
|
||||
// 2) It's truncated to a subnet prefix length instead of 128 bits
|
||||
addr := *AddrForNodeID(nid)
|
||||
addr := AddrForKey(publicKey)
|
||||
if addr == nil {
|
||||
return nil
|
||||
}
|
||||
var snet Subnet
|
||||
copy(snet[:], addr[:])
|
||||
prefix := GetPrefix()
|
||||
|
@ -102,75 +113,34 @@ func SubnetForNodeID(nid *crypto.NodeID) *Subnet {
|
|||
return &snet
|
||||
}
|
||||
|
||||
// GetNodeIDandMask returns two *NodeID.
|
||||
// The first is a NodeID with all the bits known from the Address set to their correct values.
|
||||
// The second is a bitmask with 1 bit set for each bit that was known from the Address.
|
||||
// This is used to look up NodeIDs in the DHT and tell if they match an Address.
|
||||
func (a *Address) GetNodeIDandMask() (*crypto.NodeID, *crypto.NodeID) {
|
||||
// Mask is a bitmask to mark the bits visible from the address
|
||||
// This means truncated leading 1s, first leading 0, and visible part of addr
|
||||
var nid crypto.NodeID
|
||||
var mask crypto.NodeID
|
||||
// GetKet returns the partial ed25519.PublicKey for the Address.
|
||||
// This is used for key lookup.
|
||||
func (a *Address) GetKey() ed25519.PublicKey {
|
||||
var key [ed25519.PublicKeySize]byte
|
||||
prefix := GetPrefix()
|
||||
ones := int(a[len(prefix)])
|
||||
for idx := 0; idx < ones; idx++ {
|
||||
nid[idx/8] |= 0x80 >> byte(idx%8)
|
||||
key[idx/8] |= 0x80 >> byte(idx%8)
|
||||
}
|
||||
nidOffset := ones + 1
|
||||
keyOffset := ones + 1
|
||||
addrOffset := 8*len(prefix) + 8
|
||||
for idx := addrOffset; idx < 8*len(a); idx++ {
|
||||
bits := a[idx/8] & (0x80 >> byte(idx%8))
|
||||
bits <<= byte(idx % 8)
|
||||
nidIdx := nidOffset + (idx - addrOffset)
|
||||
bits >>= byte(nidIdx % 8)
|
||||
nid[nidIdx/8] |= bits
|
||||
keyIdx := keyOffset + (idx - addrOffset)
|
||||
bits >>= byte(keyIdx % 8)
|
||||
key[keyIdx/8] |= bits
|
||||
}
|
||||
maxMask := 8*(len(a)-len(prefix)-1) + ones + 1
|
||||
for idx := 0; idx < maxMask; idx++ {
|
||||
mask[idx/8] |= 0x80 >> byte(idx%8)
|
||||
for idx := range key {
|
||||
key[idx] = ^key[idx]
|
||||
}
|
||||
return &nid, &mask
|
||||
return ed25519.PublicKey(key[:])
|
||||
}
|
||||
|
||||
// GetNodeIDLengthString returns a string representation of the known bits of the NodeID, along with the number of known bits, for use with yggdrasil.Dialer's Dial and DialContext functions.
|
||||
func (a *Address) GetNodeIDLengthString() string {
|
||||
nid, mask := a.GetNodeIDandMask()
|
||||
l := mask.PrefixLength()
|
||||
return fmt.Sprintf("%s/%d", nid.String(), l)
|
||||
}
|
||||
|
||||
// GetNodeIDandMask returns two *NodeID.
|
||||
// The first is a NodeID with all the bits known from the Subnet set to their correct values.
|
||||
// The second is a bitmask with 1 bit set for each bit that was known from the Subnet.
|
||||
// This is used to look up NodeIDs in the DHT and tell if they match a Subnet.
|
||||
func (s *Subnet) GetNodeIDandMask() (*crypto.NodeID, *crypto.NodeID) {
|
||||
// As with the address version, but visible parts of the subnet prefix instead
|
||||
var nid crypto.NodeID
|
||||
var mask crypto.NodeID
|
||||
prefix := GetPrefix()
|
||||
ones := int(s[len(prefix)])
|
||||
for idx := 0; idx < ones; idx++ {
|
||||
nid[idx/8] |= 0x80 >> byte(idx%8)
|
||||
}
|
||||
nidOffset := ones + 1
|
||||
addrOffset := 8*len(prefix) + 8
|
||||
for idx := addrOffset; idx < 8*len(s); idx++ {
|
||||
bits := s[idx/8] & (0x80 >> byte(idx%8))
|
||||
bits <<= byte(idx % 8)
|
||||
nidIdx := nidOffset + (idx - addrOffset)
|
||||
bits >>= byte(nidIdx % 8)
|
||||
nid[nidIdx/8] |= bits
|
||||
}
|
||||
maxMask := 8*(len(s)-len(prefix)-1) + ones + 1
|
||||
for idx := 0; idx < maxMask; idx++ {
|
||||
mask[idx/8] |= 0x80 >> byte(idx%8)
|
||||
}
|
||||
return &nid, &mask
|
||||
}
|
||||
|
||||
// GetNodeIDLengthString returns a string representation of the known bits of the NodeID, along with the number of known bits, for use with yggdrasil.Dialer's Dial and DialContext functions.
|
||||
func (s *Subnet) GetNodeIDLengthString() string {
|
||||
nid, mask := s.GetNodeIDandMask()
|
||||
l := mask.PrefixLength()
|
||||
return fmt.Sprintf("%s/%d", nid.String(), l)
|
||||
// GetKet returns the partial ed25519.PublicKey for the Subnet.
|
||||
// This is used for key lookup.
|
||||
func (s *Subnet) GetKey() ed25519.PublicKey {
|
||||
var addr Address
|
||||
copy(addr[:], s[:])
|
||||
return addr.GetKey()
|
||||
}
|
||||
|
|
|
@ -1,48 +1,57 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gologme/log"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/core"
|
||||
)
|
||||
|
||||
// TODO: Add authentication
|
||||
|
||||
type AdminSocket struct {
|
||||
core *yggdrasil.Core
|
||||
core *core.Core
|
||||
log *log.Logger
|
||||
listenaddr string
|
||||
listener net.Listener
|
||||
handlers map[string]handler
|
||||
started bool
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// Info refers to information that is returned to the admin socket handler.
|
||||
type Info map[string]interface{}
|
||||
type AdminSocketResponse struct {
|
||||
Status string `json:"status"`
|
||||
Request struct {
|
||||
Name string `json:"request"`
|
||||
KeepAlive bool `json:"keepalive"`
|
||||
} `json:"request"`
|
||||
Response interface{} `json:"response"`
|
||||
}
|
||||
|
||||
type handler struct {
|
||||
args []string // List of human-readable argument names
|
||||
handler func(Info) (Info, error) // First is input map, second is output
|
||||
handler func(json.RawMessage) (interface{}, error) // First is input map, second is output
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
List map[string]ListEntry `json:"list"`
|
||||
}
|
||||
|
||||
type ListEntry struct {
|
||||
Fields []string `json:"fields"`
|
||||
}
|
||||
|
||||
// AddHandler is called for each admin function to add the handler and help documentation to the API.
|
||||
func (a *AdminSocket) AddHandler(name string, args []string, handlerfunc func(Info) (Info, error)) error {
|
||||
func (a *AdminSocket) AddHandler(name string, args []string, handlerfunc func(json.RawMessage) (interface{}, error)) error {
|
||||
if _, ok := a.handlers[strings.ToLower(name)]; ok {
|
||||
return errors.New("handler already exists")
|
||||
}
|
||||
|
@ -53,289 +62,119 @@ func (a *AdminSocket) AddHandler(name string, args []string, handlerfunc func(In
|
|||
return nil
|
||||
}
|
||||
|
||||
// init runs the initial admin setup.
|
||||
func (a *AdminSocket) Init(c *yggdrasil.Core, state *config.NodeState, log *log.Logger, options interface{}) error {
|
||||
// Init runs the initial admin setup.
|
||||
func (a *AdminSocket) Init(c *core.Core, state *config.NodeState, log *log.Logger, options interface{}) error {
|
||||
a.core = c
|
||||
a.log = log
|
||||
a.handlers = make(map[string]handler)
|
||||
current := state.GetCurrent()
|
||||
a.listenaddr = current.AdminListen
|
||||
a.AddHandler("list", []string{}, func(in Info) (Info, error) {
|
||||
handlers := make(map[string]interface{})
|
||||
for handlername, handler := range a.handlers {
|
||||
handlers[handlername] = Info{"fields": handler.args}
|
||||
a.done = make(chan struct{})
|
||||
close(a.done) // Start in a done / not-started state
|
||||
_ = a.AddHandler("list", []string{}, func(_ json.RawMessage) (interface{}, error) {
|
||||
res := &ListResponse{
|
||||
List: map[string]ListEntry{},
|
||||
}
|
||||
return Info{"list": handlers}, nil
|
||||
for name, handler := range a.handlers {
|
||||
res.List[name] = ListEntry{
|
||||
Fields: handler.args,
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AdminSocket) UpdateConfig(config *config.NodeConfig) {
|
||||
a.log.Debugln("Reloading admin configuration...")
|
||||
if a.listenaddr != config.AdminListen {
|
||||
a.listenaddr = config.AdminListen
|
||||
if a.IsStarted() {
|
||||
a.Stop()
|
||||
}
|
||||
a.Start()
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AdminSocket) SetupAdminHandlers(na *AdminSocket) {
|
||||
a.AddHandler("getSelf", []string{}, func(in Info) (Info, error) {
|
||||
ip := a.core.Address().String()
|
||||
subnet := a.core.Subnet()
|
||||
return Info{
|
||||
"self": Info{
|
||||
ip: Info{
|
||||
"box_pub_key": a.core.EncryptionPublicKey(),
|
||||
"build_name": version.BuildName(),
|
||||
"build_version": version.BuildVersion(),
|
||||
"coords": fmt.Sprintf("%v", a.core.Coords()),
|
||||
"subnet": subnet.String(),
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
_ = a.AddHandler("getSelf", []string{}, func(in json.RawMessage) (interface{}, error) {
|
||||
req := &GetSelfRequest{}
|
||||
res := &GetSelfResponse{}
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := a.getSelfHandler(req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
a.AddHandler("getPeers", []string{}, func(in Info) (Info, error) {
|
||||
peers := make(Info)
|
||||
for _, p := range a.core.GetPeers() {
|
||||
addr := *address.AddrForNodeID(crypto.GetNodeID(&p.PublicKey))
|
||||
so := net.IP(addr[:]).String()
|
||||
peers[so] = Info{
|
||||
"port": p.Port,
|
||||
"uptime": p.Uptime.Seconds(),
|
||||
"bytes_sent": p.BytesSent,
|
||||
"bytes_recvd": p.BytesRecvd,
|
||||
"proto": p.Protocol,
|
||||
"endpoint": p.Endpoint,
|
||||
"box_pub_key": hex.EncodeToString(p.PublicKey[:]),
|
||||
_ = a.AddHandler("getPeers", []string{}, func(in json.RawMessage) (interface{}, error) {
|
||||
req := &GetPeersRequest{}
|
||||
res := &GetPeersResponse{}
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := a.getPeersHandler(req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Info{"peers": peers}, nil
|
||||
return res, nil
|
||||
})
|
||||
a.AddHandler("getSwitchPeers", []string{}, func(in Info) (Info, error) {
|
||||
switchpeers := make(Info)
|
||||
for _, s := range a.core.GetSwitchPeers() {
|
||||
addr := *address.AddrForNodeID(crypto.GetNodeID(&s.PublicKey))
|
||||
so := fmt.Sprint(s.Port)
|
||||
switchpeers[so] = Info{
|
||||
"ip": net.IP(addr[:]).String(),
|
||||
"coords": fmt.Sprintf("%v", s.Coords),
|
||||
"port": s.Port,
|
||||
"bytes_sent": s.BytesSent,
|
||||
"bytes_recvd": s.BytesRecvd,
|
||||
"proto": s.Protocol,
|
||||
"endpoint": s.Endpoint,
|
||||
"box_pub_key": hex.EncodeToString(s.PublicKey[:]),
|
||||
_ = a.AddHandler("getDHT", []string{}, func(in json.RawMessage) (interface{}, error) {
|
||||
req := &GetDHTRequest{}
|
||||
res := &GetDHTResponse{}
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := a.getDHTHandler(req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Info{"switchpeers": switchpeers}, nil
|
||||
return res, nil
|
||||
})
|
||||
/*
|
||||
a.AddHandler("getSwitchQueues", []string{}, func(in Info) (Info, error) {
|
||||
queues := a.core.GetSwitchQueues()
|
||||
return Info{"switchqueues": queues.asMap()}, nil
|
||||
_ = a.AddHandler("getPaths", []string{}, func(in json.RawMessage) (interface{}, error) {
|
||||
req := &GetPathsRequest{}
|
||||
res := &GetPathsResponse{}
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := a.getPathsHandler(req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
*/
|
||||
a.AddHandler("getDHT", []string{}, func(in Info) (Info, error) {
|
||||
dht := make(Info)
|
||||
for _, d := range a.core.GetDHT() {
|
||||
addr := *address.AddrForNodeID(crypto.GetNodeID(&d.PublicKey))
|
||||
so := net.IP(addr[:]).String()
|
||||
dht[so] = Info{
|
||||
"coords": fmt.Sprintf("%v", d.Coords),
|
||||
"last_seen": d.LastSeen.Seconds(),
|
||||
"box_pub_key": hex.EncodeToString(d.PublicKey[:]),
|
||||
_ = a.AddHandler("getSessions", []string{}, func(in json.RawMessage) (interface{}, error) {
|
||||
req := &GetSessionsRequest{}
|
||||
res := &GetSessionsResponse{}
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := a.getSessionsHandler(req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Info{"dht": dht}, nil
|
||||
})
|
||||
a.AddHandler("getSessions", []string{}, func(in Info) (Info, error) {
|
||||
sessions := make(Info)
|
||||
for _, s := range a.core.GetSessions() {
|
||||
addr := *address.AddrForNodeID(crypto.GetNodeID(&s.PublicKey))
|
||||
so := net.IP(addr[:]).String()
|
||||
sessions[so] = Info{
|
||||
"coords": fmt.Sprintf("%v", s.Coords),
|
||||
"bytes_sent": s.BytesSent,
|
||||
"bytes_recvd": s.BytesRecvd,
|
||||
"mtu": s.MTU,
|
||||
"uptime": s.Uptime.Seconds(),
|
||||
"was_mtu_fixed": s.WasMTUFixed,
|
||||
"box_pub_key": hex.EncodeToString(s.PublicKey[:]),
|
||||
}
|
||||
}
|
||||
return Info{"sessions": sessions}, nil
|
||||
})
|
||||
a.AddHandler("addPeer", []string{"uri", "[interface]"}, func(in Info) (Info, error) {
|
||||
// Set sane defaults
|
||||
intf := ""
|
||||
// Has interface been specified?
|
||||
if itf, ok := in["interface"]; ok {
|
||||
intf = itf.(string)
|
||||
}
|
||||
if a.core.AddPeer(in["uri"].(string), intf) == nil {
|
||||
return Info{
|
||||
"added": []string{
|
||||
in["uri"].(string),
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
return Info{
|
||||
"not_added": []string{
|
||||
in["uri"].(string),
|
||||
},
|
||||
}, errors.New("Failed to add peer")
|
||||
}
|
||||
})
|
||||
a.AddHandler("removePeer", []string{"port"}, func(in Info) (Info, error) {
|
||||
port, err := strconv.ParseInt(fmt.Sprint(in["port"]), 10, 64)
|
||||
if err != nil {
|
||||
return Info{}, err
|
||||
}
|
||||
if a.core.DisconnectPeer(uint64(port)) == nil {
|
||||
return Info{
|
||||
"removed": []string{
|
||||
fmt.Sprint(port),
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
return Info{
|
||||
"not_removed": []string{
|
||||
fmt.Sprint(port),
|
||||
},
|
||||
}, errors.New("Failed to remove peer")
|
||||
}
|
||||
})
|
||||
a.AddHandler("getAllowedEncryptionPublicKeys", []string{}, func(in Info) (Info, error) {
|
||||
return Info{"allowed_box_pubs": a.core.GetAllowedEncryptionPublicKeys()}, nil
|
||||
})
|
||||
a.AddHandler("addAllowedEncryptionPublicKey", []string{"box_pub_key"}, func(in Info) (Info, error) {
|
||||
if a.core.AddAllowedEncryptionPublicKey(in["box_pub_key"].(string)) == nil {
|
||||
return Info{
|
||||
"added": []string{
|
||||
in["box_pub_key"].(string),
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
return Info{
|
||||
"not_added": []string{
|
||||
in["box_pub_key"].(string),
|
||||
},
|
||||
}, errors.New("Failed to add allowed key")
|
||||
}
|
||||
})
|
||||
a.AddHandler("removeAllowedEncryptionPublicKey", []string{"box_pub_key"}, func(in Info) (Info, error) {
|
||||
if a.core.RemoveAllowedEncryptionPublicKey(in["box_pub_key"].(string)) == nil {
|
||||
return Info{
|
||||
"removed": []string{
|
||||
in["box_pub_key"].(string),
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
return Info{
|
||||
"not_removed": []string{
|
||||
in["box_pub_key"].(string),
|
||||
},
|
||||
}, errors.New("Failed to remove allowed key")
|
||||
}
|
||||
})
|
||||
a.AddHandler("dhtPing", []string{"box_pub_key", "coords", "[target]"}, func(in Info) (Info, error) {
|
||||
var reserr error
|
||||
var result yggdrasil.DHTRes
|
||||
if in["target"] == nil {
|
||||
in["target"] = "none"
|
||||
}
|
||||
coords := util.DecodeCoordString(in["coords"].(string))
|
||||
var boxPubKey crypto.BoxPubKey
|
||||
if b, err := hex.DecodeString(in["box_pub_key"].(string)); err == nil {
|
||||
copy(boxPubKey[:], b[:])
|
||||
if n, err := hex.DecodeString(in["target"].(string)); err == nil {
|
||||
var targetNodeID crypto.NodeID
|
||||
copy(targetNodeID[:], n[:])
|
||||
result, reserr = a.core.DHTPing(boxPubKey, coords, &targetNodeID)
|
||||
} else {
|
||||
result, reserr = a.core.DHTPing(boxPubKey, coords, nil)
|
||||
}
|
||||
} else {
|
||||
return Info{}, err
|
||||
}
|
||||
if reserr != nil {
|
||||
return Info{}, reserr
|
||||
}
|
||||
infos := make(map[string]map[string]string, len(result.Infos))
|
||||
for _, dinfo := range result.Infos {
|
||||
info := map[string]string{
|
||||
"box_pub_key": hex.EncodeToString(dinfo.PublicKey[:]),
|
||||
"coords": fmt.Sprintf("%v", dinfo.Coords),
|
||||
}
|
||||
addr := net.IP(address.AddrForNodeID(crypto.GetNodeID(&dinfo.PublicKey))[:]).String()
|
||||
infos[addr] = info
|
||||
}
|
||||
return Info{"nodes": infos}, nil
|
||||
})
|
||||
a.AddHandler("getNodeInfo", []string{"[box_pub_key]", "[coords]", "[nocache]"}, func(in Info) (Info, error) {
|
||||
var nocache bool
|
||||
if in["nocache"] != nil {
|
||||
nocache = in["nocache"].(string) == "true"
|
||||
}
|
||||
var boxPubKey crypto.BoxPubKey
|
||||
var coords []uint64
|
||||
if in["box_pub_key"] == nil && in["coords"] == nil {
|
||||
nodeinfo := a.core.MyNodeInfo()
|
||||
var jsoninfo interface{}
|
||||
if err := json.Unmarshal(nodeinfo, &jsoninfo); err != nil {
|
||||
return Info{}, err
|
||||
} else {
|
||||
return Info{"nodeinfo": jsoninfo}, nil
|
||||
}
|
||||
} else if in["box_pub_key"] == nil || in["coords"] == nil {
|
||||
return Info{}, errors.New("Expecting both box_pub_key and coords")
|
||||
} else {
|
||||
if b, err := hex.DecodeString(in["box_pub_key"].(string)); err == nil {
|
||||
copy(boxPubKey[:], b[:])
|
||||
} else {
|
||||
return Info{}, err
|
||||
}
|
||||
coords = util.DecodeCoordString(in["coords"].(string))
|
||||
}
|
||||
result, err := a.core.GetNodeInfo(boxPubKey, coords, nocache)
|
||||
if err == nil {
|
||||
var m map[string]interface{}
|
||||
if err = json.Unmarshal(result, &m); err == nil {
|
||||
return Info{"nodeinfo": m}, nil
|
||||
} else {
|
||||
return Info{}, err
|
||||
}
|
||||
} else {
|
||||
return Info{}, err
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Start runs the admin API socket to listen for / respond to admin API calls.
|
||||
func (a *AdminSocket) Start() error {
|
||||
if a.listenaddr != "none" && a.listenaddr != "" {
|
||||
a.done = make(chan struct{})
|
||||
go a.listen()
|
||||
a.started = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsStarted returns true if the module has been started.
|
||||
func (a *AdminSocket) IsStarted() bool {
|
||||
return a.started
|
||||
select {
|
||||
case <-a.done:
|
||||
// Not blocking, so we're not currently running
|
||||
return false
|
||||
default:
|
||||
// Blocked, so we must have started
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Stop will stop the admin API and close the socket.
|
||||
func (a *AdminSocket) Stop() error {
|
||||
if a.listener != nil {
|
||||
a.started = false
|
||||
return a.listener.Close()
|
||||
} else {
|
||||
return nil
|
||||
select {
|
||||
case <-a.done:
|
||||
default:
|
||||
close(a.done)
|
||||
}
|
||||
return a.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// listen is run by start and manages API connections.
|
||||
|
@ -389,6 +228,14 @@ func (a *AdminSocket) listen() {
|
|||
conn, err := a.listener.Accept()
|
||||
if err == nil {
|
||||
go a.handleRequest(conn)
|
||||
} else {
|
||||
select {
|
||||
case <-a.done:
|
||||
// Not blocked, so we havent started or already stopped
|
||||
return
|
||||
default:
|
||||
// Blocked, so we're supposed to keep running
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -396,20 +243,20 @@ func (a *AdminSocket) listen() {
|
|||
// handleRequest calls the request handler for each request sent to the admin API.
|
||||
func (a *AdminSocket) handleRequest(conn net.Conn) {
|
||||
decoder := json.NewDecoder(conn)
|
||||
decoder.DisallowUnknownFields()
|
||||
|
||||
encoder := json.NewEncoder(conn)
|
||||
encoder.SetIndent("", " ")
|
||||
recv := make(Info)
|
||||
send := make(Info)
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
defer func() {
|
||||
r := recover()
|
||||
if r != nil {
|
||||
send = Info{
|
||||
"status": "error",
|
||||
"error": "Check your syntax and input types",
|
||||
}
|
||||
a.log.Debugln("Admin socket error:", r)
|
||||
if err := encoder.Encode(&send); err != nil {
|
||||
if err := encoder.Encode(&ErrorResponse{
|
||||
Error: "Check your syntax and input types",
|
||||
}); err != nil {
|
||||
a.log.Debugln("Admin socket JSON encode error:", err)
|
||||
}
|
||||
conn.Close()
|
||||
|
@ -417,83 +264,39 @@ func (a *AdminSocket) handleRequest(conn net.Conn) {
|
|||
}()
|
||||
|
||||
for {
|
||||
// Start with a clean slate on each request
|
||||
recv = Info{}
|
||||
send = Info{}
|
||||
|
||||
// Decode the input
|
||||
if err := decoder.Decode(&recv); err != nil {
|
||||
a.log.Debugln("Admin socket JSON decode error:", err)
|
||||
return
|
||||
var err error
|
||||
var buf json.RawMessage
|
||||
_ = decoder.Decode(&buf)
|
||||
var resp AdminSocketResponse
|
||||
resp.Status = "success"
|
||||
if err = json.Unmarshal(buf, &resp.Request); err == nil {
|
||||
if resp.Request.Name == "" {
|
||||
resp.Status = "error"
|
||||
resp.Response = &ErrorResponse{
|
||||
Error: "No request specified",
|
||||
}
|
||||
|
||||
// Send the request back with the response, and default to "error"
|
||||
// unless the status is changed below by one of the handlers
|
||||
send["request"] = recv
|
||||
send["status"] = "error"
|
||||
|
||||
n := strings.ToLower(recv["request"].(string))
|
||||
|
||||
if _, ok := recv["request"]; !ok {
|
||||
send["error"] = "No request sent"
|
||||
goto respond
|
||||
}
|
||||
|
||||
if h, ok := a.handlers[n]; ok {
|
||||
// Check that we have all the required arguments
|
||||
for _, arg := range h.args {
|
||||
// An argument in [square brackets] is optional and not required,
|
||||
// so we can safely ignore those
|
||||
if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") {
|
||||
continue
|
||||
}
|
||||
// Check if the field is missing
|
||||
if _, ok := recv[arg]; !ok {
|
||||
send = Info{
|
||||
"status": "error",
|
||||
"error": "Expected field missing: " + arg,
|
||||
"expecting": arg,
|
||||
}
|
||||
goto respond
|
||||
}
|
||||
}
|
||||
|
||||
// By this point we should have all the fields we need, so call
|
||||
// the handler
|
||||
response, err := h.handler(recv)
|
||||
} else if h, ok := a.handlers[strings.ToLower(resp.Request.Name)]; ok {
|
||||
resp.Response, err = h.handler(buf)
|
||||
if err != nil {
|
||||
send["error"] = err.Error()
|
||||
if response != nil {
|
||||
send["response"] = response
|
||||
goto respond
|
||||
}
|
||||
} else {
|
||||
send["status"] = "success"
|
||||
if response != nil {
|
||||
send["response"] = response
|
||||
goto respond
|
||||
resp.Status = "error"
|
||||
resp.Response = &ErrorResponse{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Start with a clean response on each request, which defaults to an error
|
||||
// state. If a handler is found below then this will be overwritten
|
||||
send = Info{
|
||||
"request": recv,
|
||||
"status": "error",
|
||||
"error": fmt.Sprintf("Unknown action '%s', try 'list' for help", recv["request"].(string)),
|
||||
resp.Status = "error"
|
||||
resp.Response = &ErrorResponse{
|
||||
Error: fmt.Sprintf("Unknown action '%s', try 'list' for help", resp.Request.Name),
|
||||
}
|
||||
goto respond
|
||||
}
|
||||
|
||||
// Send the response back
|
||||
respond:
|
||||
if err := encoder.Encode(&send); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If "keepalive" isn't true then close the connection
|
||||
if keepalive, ok := recv["keepalive"]; !ok || !keepalive.(bool) {
|
||||
conn.Close()
|
||||
if err = encoder.Encode(resp); err != nil {
|
||||
a.log.Debugln("Encode error:", err)
|
||||
}
|
||||
if !resp.Request.KeepAlive {
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
5
src/admin/error.go
Normal file
5
src/admin/error.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
package admin
|
||||
|
||||
type ErrorResponse struct {
|
||||
Error string `json:"error"`
|
||||
}
|
34
src/admin/getdht.go
Normal file
34
src/admin/getdht.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
)
|
||||
|
||||
type GetDHTRequest struct{}
|
||||
|
||||
type GetDHTResponse struct {
|
||||
DHT map[string]DHTEntry `json:"dht"`
|
||||
}
|
||||
|
||||
type DHTEntry struct {
|
||||
PublicKey string `json:"key"`
|
||||
Port uint64 `json:"port"`
|
||||
Rest uint64 `json:"rest"`
|
||||
}
|
||||
|
||||
func (a *AdminSocket) getDHTHandler(req *GetDHTRequest, res *GetDHTResponse) error {
|
||||
res.DHT = map[string]DHTEntry{}
|
||||
for _, d := range a.core.GetDHT() {
|
||||
addr := address.AddrForKey(d.Key)
|
||||
so := net.IP(addr[:]).String()
|
||||
res.DHT[so] = DHTEntry{
|
||||
PublicKey: hex.EncodeToString(d.Key[:]),
|
||||
Port: d.Port,
|
||||
Rest: d.Rest,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
33
src/admin/getpaths.go
Normal file
33
src/admin/getpaths.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
)
|
||||
|
||||
type GetPathsRequest struct {
|
||||
}
|
||||
|
||||
type GetPathsResponse struct {
|
||||
Paths map[string]PathEntry `json:"paths"`
|
||||
}
|
||||
|
||||
type PathEntry struct {
|
||||
PublicKey string `json:"key"`
|
||||
Path []uint64 `json:"path"`
|
||||
}
|
||||
|
||||
func (a *AdminSocket) getPathsHandler(req *GetPathsRequest, res *GetPathsResponse) error {
|
||||
res.Paths = map[string]PathEntry{}
|
||||
for _, p := range a.core.GetPaths() {
|
||||
addr := address.AddrForKey(p.Key)
|
||||
so := net.IP(addr[:]).String()
|
||||
res.Paths[so] = PathEntry{
|
||||
PublicKey: hex.EncodeToString(p.Key),
|
||||
Path: p.Path,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
35
src/admin/getpeers.go
Normal file
35
src/admin/getpeers.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
)
|
||||
|
||||
type GetPeersRequest struct {
|
||||
}
|
||||
|
||||
type GetPeersResponse struct {
|
||||
Peers map[string]PeerEntry `json:"peers"`
|
||||
}
|
||||
|
||||
type PeerEntry struct {
|
||||
PublicKey string `json:"key"`
|
||||
Port uint64 `json:"port"`
|
||||
Coords []uint64 `json:"coords"`
|
||||
}
|
||||
|
||||
func (a *AdminSocket) getPeersHandler(req *GetPeersRequest, res *GetPeersResponse) error {
|
||||
res.Peers = map[string]PeerEntry{}
|
||||
for _, p := range a.core.GetPeers() {
|
||||
addr := address.AddrForKey(p.Key)
|
||||
so := net.IP(addr[:]).String()
|
||||
res.Peers[so] = PeerEntry{
|
||||
PublicKey: hex.EncodeToString(p.Key),
|
||||
Port: p.Port,
|
||||
Coords: p.Coords,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
36
src/admin/getself.go
Normal file
36
src/admin/getself.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
||||
)
|
||||
|
||||
type GetSelfRequest struct{}
|
||||
|
||||
type GetSelfResponse struct {
|
||||
Self map[string]SelfEntry `json:"self"`
|
||||
}
|
||||
|
||||
type SelfEntry struct {
|
||||
BuildName string `json:"build_name"`
|
||||
BuildVersion string `json:"build_version"`
|
||||
PublicKey string `json:"key"`
|
||||
Coords []uint64 `json:"coords"`
|
||||
Subnet string `json:"subnet"`
|
||||
}
|
||||
|
||||
func (a *AdminSocket) getSelfHandler(req *GetSelfRequest, res *GetSelfResponse) error {
|
||||
res.Self = make(map[string]SelfEntry)
|
||||
self := a.core.GetSelf()
|
||||
addr := a.core.Address().String()
|
||||
snet := a.core.Subnet()
|
||||
res.Self[addr] = SelfEntry{
|
||||
BuildName: version.BuildName(),
|
||||
BuildVersion: version.BuildVersion(),
|
||||
PublicKey: hex.EncodeToString(self.Key[:]),
|
||||
Subnet: snet.String(),
|
||||
Coords: self.Coords,
|
||||
}
|
||||
return nil
|
||||
}
|
30
src/admin/getsessions.go
Normal file
30
src/admin/getsessions.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
)
|
||||
|
||||
type GetSessionsRequest struct{}
|
||||
|
||||
type GetSessionsResponse struct {
|
||||
Sessions map[string]SessionEntry `json:"sessions"`
|
||||
}
|
||||
|
||||
type SessionEntry struct {
|
||||
PublicKey string `json:"key"`
|
||||
}
|
||||
|
||||
func (a *AdminSocket) getSessionsHandler(req *GetSessionsRequest, res *GetSessionsResponse) error {
|
||||
res.Sessions = map[string]SessionEntry{}
|
||||
for _, s := range a.core.GetSessions() {
|
||||
addr := address.AddrForKey(s.Key)
|
||||
so := net.IP(addr[:]).String()
|
||||
res.Sessions[so] = SessionEntry{
|
||||
PublicKey: hex.EncodeToString(s.Key[:]),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -17,16 +17,13 @@ configuration option that is not provided.
|
|||
package config
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"encoding/hex"
|
||||
"sync"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/defaults"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/types"
|
||||
)
|
||||
|
||||
type MTU = types.MTU
|
||||
|
||||
// NodeState represents the active and previous configuration of an Yggdrasil
|
||||
// node. A NodeState object is returned when starting an Yggdrasil node. Note
|
||||
// that this structure and related functions are likely to disappear soon.
|
||||
|
@ -67,18 +64,13 @@ type NodeConfig struct {
|
|||
Listen []string `comment:"Listen addresses for incoming connections. You will need to add\nlisteners in order to accept incoming peerings from non-local nodes.\nMulticast peer discovery will work regardless of any listeners set\nhere. Each listener should be specified in URI format as above, e.g.\ntcp://0.0.0.0:0 or tcp://[::]:0 to listen on all interfaces."`
|
||||
AdminListen string `comment:"Listen address for admin connections. Default is to listen for local\nconnections either on TCP/9001 or a UNIX socket depending on your\nplatform. Use this value for yggdrasilctl -endpoint=X. To disable\nthe admin socket, use the value \"none\" instead."`
|
||||
MulticastInterfaces []string `comment:"Regular expressions for which interfaces multicast peer discovery\nshould be enabled on. If none specified, multicast peer discovery is\ndisabled. The default value is .* which uses all interfaces."`
|
||||
MulticastDNSInterfaces []string `comment:"Regular expressions for which interfaces mDNS peer discovery\nshould be enabled on. If none specified, mDNS peer discovery is\ndisabled. The default value is .* which uses all interfaces."`
|
||||
AllowedEncryptionPublicKeys []string `comment:"List of peer encryption public keys to allow incoming TCP peering\nconnections from. If left empty/undefined then all connections will\nbe allowed by default. This does not affect outgoing peerings, nor\ndoes it affect link-local peers discovered via multicast."`
|
||||
EncryptionPublicKey string `comment:"Your public encryption key. Your peers may ask you for this to put\ninto their AllowedEncryptionPublicKeys configuration."`
|
||||
EncryptionPrivateKey string `comment:"Your private encryption key. DO NOT share this with anyone!"`
|
||||
SigningPublicKey string `comment:"Your public signing key. You should not ordinarily need to share\nthis with anyone."`
|
||||
SigningPrivateKey string `comment:"Your private signing key. DO NOT share this with anyone!"`
|
||||
AllowedPublicKeys []string `comment:"List of peer encryption public keys to allow incoming TCP peering\nconnections from. If left empty/undefined then all connections will\nbe allowed by default. This does not affect outgoing peerings, nor\ndoes it affect link-local peers discovered via multicast."`
|
||||
PublicKey string `comment:"Your public signing key. Your peers may ask you for this to put\ninto their AllowedPublicKeys configuration."`
|
||||
PrivateKey string `comment:"Your private signing key. DO NOT share this with anyone!"`
|
||||
LinkLocalTCPPort uint16 `comment:"The port number to be used for the link-local TCP listeners for the\nconfigured MulticastInterfaces. This option does not affect listeners\nspecified in the Listen option. Unless you plan to firewall link-local\ntraffic, it is best to leave this as the default value of 0. This\noption cannot currently be changed by reloading config during runtime."`
|
||||
IfName string `comment:"Local network interface name for TUN adapter, or \"auto\" to select\nan interface automatically, or \"none\" to run without TUN."`
|
||||
IfMTU MTU `comment:"Maximum Transmission Unit (MTU) size for your local TUN interface.\nDefault is the largest supported size for your platform. The lowest\npossible value is 1280."`
|
||||
IfMTU uint64 `comment:"Maximum Transmission Unit (MTU) size for your local TUN interface.\nDefault is the largest supported size for your platform. The lowest\npossible value is 1280."`
|
||||
SessionFirewall SessionFirewall `comment:"The session firewall controls who can send/receive network traffic\nto/from. This is useful if you want to protect this node without\nresorting to using a real firewall. This does not affect traffic\nbeing routed via this node to somewhere else. Rules are prioritised as\nfollows: blacklist, whitelist, always allow outgoing, direct, remote."`
|
||||
TunnelRouting TunnelRouting `comment:"Allow tunneling non-Yggdrasil traffic over Yggdrasil. This effectively\nallows you to use Yggdrasil to route to, or to bridge other networks,\nsimilar to a VPN tunnel. Tunnelling works between any two nodes and\ndoes not require them to be directly peered."`
|
||||
SwitchOptions SwitchOptions `comment:"Advanced options for tuning the switch. Normally you will not need\nto edit these options."`
|
||||
NodeInfoPrivacy bool `comment:"By default, nodeinfo contains some defaults including the platform,\narchitecture and Yggdrasil version. These can help when surveying\nthe network and diagnosing network routing problems. Enabling\nnodeinfo privacy prevents this, so that only items specified in\n\"NodeInfo\" are sent back if specified."`
|
||||
NodeInfo map[string]interface{} `comment:"Optional node info. This must be a { \"key\": \"value\", ... } map\nor set as null. This is entirely optional but, if set, is visible\nto the whole network on request."`
|
||||
}
|
||||
|
@ -89,24 +81,8 @@ type SessionFirewall struct {
|
|||
AllowFromDirect bool `comment:"Allow network traffic from directly connected peers."`
|
||||
AllowFromRemote bool `comment:"Allow network traffic from remote nodes on the network that you are\nnot directly peered with."`
|
||||
AlwaysAllowOutbound bool `comment:"Allow outbound network traffic regardless of AllowFromDirect or\nAllowFromRemote. This does allow a remote node to send unsolicited\ntraffic back to you for the length of the session."`
|
||||
WhitelistEncryptionPublicKeys []string `comment:"List of public keys from which network traffic is always accepted,\nregardless of AllowFromDirect or AllowFromRemote."`
|
||||
BlacklistEncryptionPublicKeys []string `comment:"List of public keys from which network traffic is always rejected,\nregardless of the whitelist, AllowFromDirect or AllowFromRemote."`
|
||||
}
|
||||
|
||||
// TunnelRouting contains the crypto-key routing tables for tunneling regular
|
||||
// IPv4 or IPv6 subnets across the Yggdrasil network.
|
||||
type TunnelRouting struct {
|
||||
Enable bool `comment:"Enable or disable tunnel routing."`
|
||||
IPv6RemoteSubnets map[string]string `comment:"IPv6 subnets belonging to remote nodes, mapped to the node's public\nkey, e.g. { \"aaaa:bbbb:cccc::/e\": \"boxpubkey\", ... }"`
|
||||
IPv6LocalSubnets []string `comment:"IPv6 subnets belonging to this node's end of the tunnels. Only traffic\nfrom these ranges (or the Yggdrasil node's IPv6 address/subnet)\nwill be tunnelled."`
|
||||
IPv4RemoteSubnets map[string]string `comment:"IPv4 subnets belonging to remote nodes, mapped to the node's public\nkey, e.g. { \"a.b.c.d/e\": \"boxpubkey\", ... }"`
|
||||
IPv4LocalSubnets []string `comment:"IPv4 subnets belonging to this node's end of the tunnels. Only traffic\nfrom these ranges will be tunnelled."`
|
||||
}
|
||||
|
||||
// SwitchOptions contains tuning options for the switch. These are advanced
|
||||
// options and shouldn't be changed unless necessary.
|
||||
type SwitchOptions struct {
|
||||
MaxTotalQueueSize uint64 `comment:"Maximum size of all switch queues combined (in bytes)."`
|
||||
WhitelistPublicKeys []string `comment:"List of public keys from which network traffic is always accepted,\nregardless of AllowFromDirect or AllowFromRemote."`
|
||||
BlacklistPublicKeys []string `comment:"List of public keys from which network traffic is always rejected,\nregardless of the whitelist, AllowFromDirect or AllowFromRemote."`
|
||||
}
|
||||
|
||||
// Generates default configuration and returns a pointer to the resulting
|
||||
|
@ -114,19 +90,19 @@ type SwitchOptions struct {
|
|||
// using -autoconf.
|
||||
func GenerateConfig() *NodeConfig {
|
||||
// Generate encryption keys.
|
||||
bpub, bpriv := crypto.NewBoxKeys()
|
||||
spub, spriv := crypto.NewSigKeys()
|
||||
spub, spriv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Create a node configuration and populate it.
|
||||
cfg := NodeConfig{}
|
||||
cfg.Listen = []string{}
|
||||
cfg.AdminListen = defaults.GetDefaults().DefaultAdminListen
|
||||
cfg.EncryptionPublicKey = hex.EncodeToString(bpub[:])
|
||||
cfg.EncryptionPrivateKey = hex.EncodeToString(bpriv[:])
|
||||
cfg.SigningPublicKey = hex.EncodeToString(spub[:])
|
||||
cfg.SigningPrivateKey = hex.EncodeToString(spriv[:])
|
||||
cfg.PublicKey = hex.EncodeToString(spub[:])
|
||||
cfg.PrivateKey = hex.EncodeToString(spriv[:])
|
||||
cfg.Peers = []string{}
|
||||
cfg.InterfacePeers = map[string][]string{}
|
||||
cfg.AllowedEncryptionPublicKeys = []string{}
|
||||
cfg.AllowedPublicKeys = []string{}
|
||||
cfg.MulticastInterfaces = defaults.GetDefaults().DefaultMulticastInterfaces
|
||||
cfg.MulticastDNSInterfaces = defaults.GetDefaults().DefaultMulticastDNSInterfaces
|
||||
cfg.IfName = defaults.GetDefaults().DefaultIfName
|
||||
|
@ -135,27 +111,19 @@ func GenerateConfig() *NodeConfig {
|
|||
cfg.SessionFirewall.AllowFromDirect = true
|
||||
cfg.SessionFirewall.AllowFromRemote = true
|
||||
cfg.SessionFirewall.AlwaysAllowOutbound = true
|
||||
cfg.SwitchOptions.MaxTotalQueueSize = 4 * 1024 * 1024
|
||||
cfg.NodeInfoPrivacy = false
|
||||
|
||||
return &cfg
|
||||
}
|
||||
|
||||
// NewEncryptionKeys replaces the encryption keypair in the NodeConfig with a
|
||||
// new encryption keypair. The encryption keys are used by the router to encrypt
|
||||
// traffic and to derive the node ID and IPv6 address/subnet of the node, so
|
||||
// this is equivalent to discarding the node's identity on the network.
|
||||
func (cfg *NodeConfig) NewEncryptionKeys() {
|
||||
bpub, bpriv := crypto.NewBoxKeys()
|
||||
cfg.EncryptionPublicKey = hex.EncodeToString(bpub[:])
|
||||
cfg.EncryptionPrivateKey = hex.EncodeToString(bpriv[:])
|
||||
}
|
||||
|
||||
// NewSigningKeys replaces the signing keypair in the NodeConfig with a new
|
||||
// signing keypair. The signing keys are used by the switch to derive the
|
||||
// structure of the spanning tree.
|
||||
func (cfg *NodeConfig) NewSigningKeys() {
|
||||
spub, spriv := crypto.NewSigKeys()
|
||||
cfg.SigningPublicKey = hex.EncodeToString(spub[:])
|
||||
cfg.SigningPrivateKey = hex.EncodeToString(spriv[:])
|
||||
func (cfg *NodeConfig) NewKeys() {
|
||||
spub, spriv, err := ed25519.GenerateKey(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cfg.PublicKey = hex.EncodeToString(spub[:])
|
||||
cfg.PrivateKey = hex.EncodeToString(spriv[:])
|
||||
}
|
||||
|
|
224
src/core/api.go
Normal file
224
src/core/api.go
Normal file
|
@ -0,0 +1,224 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
//"encoding/hex"
|
||||
//"errors"
|
||||
//"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
//"sort"
|
||||
//"time"
|
||||
|
||||
"github.com/gologme/log"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
//"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
//"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
type Self struct {
|
||||
Key ed25519.PublicKey
|
||||
Root ed25519.PublicKey
|
||||
Coords []uint64
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
Key ed25519.PublicKey
|
||||
Root ed25519.PublicKey
|
||||
Coords []uint64
|
||||
Port uint64
|
||||
}
|
||||
|
||||
type DHTEntry struct {
|
||||
Key ed25519.PublicKey
|
||||
Port uint64
|
||||
Rest uint64
|
||||
}
|
||||
|
||||
type PathEntry struct {
|
||||
Key ed25519.PublicKey
|
||||
Path []uint64
|
||||
}
|
||||
|
||||
type Session struct {
|
||||
Key ed25519.PublicKey
|
||||
}
|
||||
|
||||
func (c *Core) GetSelf() Self {
|
||||
var self Self
|
||||
s := c.PacketConn.PacketConn.Debug.GetSelf()
|
||||
self.Key = s.Key
|
||||
self.Root = s.Root
|
||||
self.Coords = s.Coords
|
||||
return self
|
||||
}
|
||||
|
||||
func (c *Core) GetPeers() []Peer {
|
||||
var peers []Peer
|
||||
ps := c.PacketConn.PacketConn.Debug.GetPeers()
|
||||
for _, p := range ps {
|
||||
var info Peer
|
||||
info.Key = p.Key
|
||||
info.Root = p.Root
|
||||
info.Coords = p.Coords
|
||||
info.Port = p.Port
|
||||
peers = append(peers, info)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func (c *Core) GetDHT() []DHTEntry {
|
||||
var dhts []DHTEntry
|
||||
ds := c.PacketConn.PacketConn.Debug.GetDHT()
|
||||
for _, d := range ds {
|
||||
var info DHTEntry
|
||||
info.Key = d.Key
|
||||
info.Port = d.Port
|
||||
info.Rest = d.Rest
|
||||
dhts = append(dhts, info)
|
||||
}
|
||||
return dhts
|
||||
}
|
||||
|
||||
func (c *Core) GetPaths() []PathEntry {
|
||||
var paths []PathEntry
|
||||
ps := c.PacketConn.PacketConn.Debug.GetPaths()
|
||||
for _, p := range ps {
|
||||
var info PathEntry
|
||||
info.Key = p.Key
|
||||
info.Path = p.Path
|
||||
paths = append(paths, info)
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
func (c *Core) GetSessions() []Session {
|
||||
var sessions []Session
|
||||
ss := c.PacketConn.Debug.GetSessions()
|
||||
for _, s := range ss {
|
||||
var info Session
|
||||
info.Key = s.Key
|
||||
sessions = append(sessions, info)
|
||||
}
|
||||
return sessions
|
||||
}
|
||||
|
||||
// Listen starts a new listener (either TCP or TLS). The input should be a url.URL
|
||||
// parsed from a string of the form e.g. "tcp://a.b.c.d:e". In the case of a
|
||||
// link-local address, the interface should be provided as the second argument.
|
||||
func (c *Core) Listen(u *url.URL, sintf string) (*TcpListener, error) {
|
||||
return c.links.tcp.listenURL(u, sintf)
|
||||
}
|
||||
|
||||
// Address gets the IPv6 address of the Yggdrasil node. This is always a /128
|
||||
// address. The IPv6 address is only relevant when the node is operating as an
|
||||
// IP router and often is meaningless when embedded into an application, unless
|
||||
// that application also implements either VPN functionality or deals with IP
|
||||
// packets specifically.
|
||||
func (c *Core) Address() net.IP {
|
||||
addr := net.IP(address.AddrForKey(c.public)[:])
|
||||
return addr
|
||||
}
|
||||
|
||||
// Subnet gets the routed IPv6 subnet of the Yggdrasil node. This is always a
|
||||
// /64 subnet. The IPv6 subnet is only relevant when the node is operating as an
|
||||
// IP router and often is meaningless when embedded into an application, unless
|
||||
// that application also implements either VPN functionality or deals with IP
|
||||
// packets specifically.
|
||||
func (c *Core) Subnet() net.IPNet {
|
||||
subnet := address.SubnetForKey(c.public)[:]
|
||||
subnet = append(subnet, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
return net.IPNet{IP: subnet, Mask: net.CIDRMask(64, 128)}
|
||||
}
|
||||
|
||||
// SetLogger sets the output logger of the Yggdrasil node after startup. This
|
||||
// may be useful if you want to redirect the output later. Note that this
|
||||
// expects a Logger from the github.com/gologme/log package and not from Go's
|
||||
// built-in log package.
|
||||
func (c *Core) SetLogger(log *log.Logger) {
|
||||
c.log = log
|
||||
}
|
||||
|
||||
// AddPeer adds a peer. This should be specified in the peer URI format, e.g.:
|
||||
// tcp://a.b.c.d:e
|
||||
// socks://a.b.c.d:e/f.g.h.i:j
|
||||
// This adds the peer to the peer list, so that they will be called again if the
|
||||
// connection drops.
|
||||
/*
|
||||
func (c *Core) AddPeer(addr string, sintf string) error {
|
||||
if err := c.CallPeer(addr, sintf); err != nil {
|
||||
// TODO: We maybe want this to write the peer to the persistent
|
||||
// configuration even if a connection attempt fails, but first we'll need to
|
||||
// move the code to check the peer URI so that we don't deliberately save a
|
||||
// peer with a known bad URI. Loading peers from config should really do the
|
||||
// same thing too but I don't think that happens today
|
||||
return err
|
||||
}
|
||||
c.config.Mutex.Lock()
|
||||
defer c.config.Mutex.Unlock()
|
||||
if sintf == "" {
|
||||
for _, peer := range c.config.Current.Peers {
|
||||
if peer == addr {
|
||||
return errors.New("peer already added")
|
||||
}
|
||||
}
|
||||
c.config.Current.Peers = append(c.config.Current.Peers, addr)
|
||||
} else {
|
||||
if _, ok := c.config.Current.InterfacePeers[sintf]; ok {
|
||||
for _, peer := range c.config.Current.InterfacePeers[sintf] {
|
||||
if peer == addr {
|
||||
return errors.New("peer already added")
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := c.config.Current.InterfacePeers[sintf]; !ok {
|
||||
c.config.Current.InterfacePeers[sintf] = []string{addr}
|
||||
} else {
|
||||
c.config.Current.InterfacePeers[sintf] = append(c.config.Current.InterfacePeers[sintf], addr)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
func (c *Core) RemovePeer(addr string, sintf string) error {
|
||||
if sintf == "" {
|
||||
for i, peer := range c.config.Current.Peers {
|
||||
if peer == addr {
|
||||
c.config.Current.Peers = append(c.config.Current.Peers[:i], c.config.Current.Peers[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if _, ok := c.config.Current.InterfacePeers[sintf]; ok {
|
||||
for i, peer := range c.config.Current.InterfacePeers[sintf] {
|
||||
if peer == addr {
|
||||
c.config.Current.InterfacePeers[sintf] = append(c.config.Current.InterfacePeers[sintf][:i], c.config.Current.InterfacePeers[sintf][i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
panic("TODO") // Get the net.Conn to this peer (if any) and close it
|
||||
c.peers.Act(nil, func() {
|
||||
ports := c.peers.ports
|
||||
for _, peer := range ports {
|
||||
if addr == peer.intf.name() {
|
||||
c.peers._removePeer(peer)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
// CallPeer calls a peer once. This should be specified in the peer URI format,
|
||||
// e.g.:
|
||||
// tcp://a.b.c.d:e
|
||||
// socks://a.b.c.d:e/f.g.h.i:j
|
||||
// This does not add the peer to the peer list, so if the connection drops, the
|
||||
// peer will not be called again automatically.
|
||||
func (c *Core) CallPeer(u *url.URL, sintf string) error {
|
||||
return c.links.call(u, sintf)
|
||||
}
|
|
@ -1,16 +1,19 @@
|
|||
package yggdrasil
|
||||
package core
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
iw "github.com/Arceliar/ironwood/encrypted"
|
||||
"github.com/Arceliar/phony"
|
||||
"github.com/gologme/log"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
//"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
||||
)
|
||||
|
||||
|
@ -21,15 +24,11 @@ type Core struct {
|
|||
// We're going to keep our own copy of the provided config - that way we can
|
||||
// guarantee that it will be covered by the mutex
|
||||
phony.Inbox
|
||||
*iw.PacketConn
|
||||
config config.NodeState // Config
|
||||
boxPub crypto.BoxPubKey
|
||||
boxPriv crypto.BoxPrivKey
|
||||
sigPub crypto.SigPubKey
|
||||
sigPriv crypto.SigPrivKey
|
||||
switchTable switchTable
|
||||
peers peers
|
||||
router router
|
||||
link link
|
||||
secret ed25519.PrivateKey
|
||||
public ed25519.PublicKey
|
||||
links links
|
||||
log *log.Logger
|
||||
addPeerTimer *time.Timer
|
||||
}
|
||||
|
@ -45,42 +44,20 @@ func (c *Core) _init() error {
|
|||
|
||||
current := c.config.GetCurrent()
|
||||
|
||||
boxPrivHex, err := hex.DecodeString(current.EncryptionPrivateKey)
|
||||
sigPriv, err := hex.DecodeString(current.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(boxPrivHex) < crypto.BoxPrivKeyLen {
|
||||
return errors.New("EncryptionPrivateKey is incorrect length")
|
||||
if len(sigPriv) < ed25519.PrivateKeySize {
|
||||
return errors.New("PrivateKey is incorrect length")
|
||||
}
|
||||
|
||||
sigPrivHex, err := hex.DecodeString(current.SigningPrivateKey)
|
||||
if err != nil {
|
||||
c.secret = ed25519.PrivateKey(sigPriv)
|
||||
c.public = c.secret.Public().(ed25519.PublicKey)
|
||||
// TODO check public against current.PublicKey, error if they don't match
|
||||
|
||||
c.PacketConn, err = iw.NewPacketConn(c.secret)
|
||||
return err
|
||||
}
|
||||
if len(sigPrivHex) < crypto.SigPrivKeyLen {
|
||||
return errors.New("SigningPrivateKey is incorrect length")
|
||||
}
|
||||
|
||||
copy(c.boxPriv[:], boxPrivHex)
|
||||
copy(c.sigPriv[:], sigPrivHex)
|
||||
|
||||
boxPub, sigPub := c.boxPriv.Public(), c.sigPriv.Public()
|
||||
|
||||
copy(c.boxPub[:], boxPub[:])
|
||||
copy(c.sigPub[:], sigPub[:])
|
||||
|
||||
if bp := hex.EncodeToString(c.boxPub[:]); current.EncryptionPublicKey != bp {
|
||||
c.log.Warnln("EncryptionPublicKey in config is incorrect, should be", bp)
|
||||
}
|
||||
if sp := hex.EncodeToString(c.sigPub[:]); current.SigningPublicKey != sp {
|
||||
c.log.Warnln("SigningPublicKey in config is incorrect, should be", sp)
|
||||
}
|
||||
|
||||
c.peers.init(c)
|
||||
c.router.init(c)
|
||||
c.switchTable.init(c) // TODO move before peers? before router?
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any static peers were provided in the configuration above then we should
|
||||
|
@ -92,8 +69,12 @@ func (c *Core) _addPeerLoop() {
|
|||
|
||||
// Add peers from the Peers section
|
||||
for _, peer := range current.Peers {
|
||||
go func(peer, intf string) {
|
||||
if err := c.CallPeer(peer, intf); err != nil {
|
||||
go func(peer string, intf string) {
|
||||
u, err := url.Parse(peer)
|
||||
if err != nil {
|
||||
c.log.Errorln("Failed to parse peer url:", peer, err)
|
||||
}
|
||||
if err := c.CallPeer(u, intf); err != nil {
|
||||
c.log.Errorln("Failed to add peer:", err)
|
||||
}
|
||||
}(peer, "") // TODO: this should be acted and not in a goroutine?
|
||||
|
@ -102,33 +83,23 @@ func (c *Core) _addPeerLoop() {
|
|||
// Add peers from the InterfacePeers section
|
||||
for intf, intfpeers := range current.InterfacePeers {
|
||||
for _, peer := range intfpeers {
|
||||
go func(peer, intf string) {
|
||||
if err := c.CallPeer(peer, intf); err != nil {
|
||||
go func(peer string, intf string) {
|
||||
u, err := url.Parse(peer)
|
||||
if err != nil {
|
||||
c.log.Errorln("Failed to parse peer url:", peer, err)
|
||||
}
|
||||
if err := c.CallPeer(u, intf); err != nil {
|
||||
c.log.Errorln("Failed to add peer:", err)
|
||||
}
|
||||
}(peer, intf) // TODO: this should be acted and not in a goroutine?
|
||||
}
|
||||
}
|
||||
|
||||
if c.addPeerTimer != nil {
|
||||
c.addPeerTimer = time.AfterFunc(time.Minute, func() {
|
||||
c.Act(nil, c._addPeerLoop)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateConfig updates the configuration in Core with the provided
|
||||
// config.NodeConfig and then signals the various module goroutines to
|
||||
// reconfigure themselves if needed.
|
||||
func (c *Core) UpdateConfig(config *config.NodeConfig) {
|
||||
c.Act(nil, func() {
|
||||
c.log.Debugln("Reloading node configuration...")
|
||||
|
||||
// Replace the active configuration with the supplied one
|
||||
c.config.Replace(*config)
|
||||
|
||||
// Notify the router and switch about the new configuration
|
||||
c.router.Act(c, c.router.reconfigure)
|
||||
c.switchTable.Act(c, c.switchTable.reconfigure)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts up Yggdrasil using the provided config.NodeConfig, and outputs
|
||||
|
@ -165,22 +136,24 @@ func (c *Core) _start(nc *config.NodeConfig, log *log.Logger) (*config.NodeState
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.link.init(c); err != nil {
|
||||
if err := c.links.init(c); err != nil {
|
||||
c.log.Errorln("Failed to start link interfaces")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.switchTable.start(); err != nil {
|
||||
c.log.Errorln("Failed to start switch")
|
||||
return nil, err
|
||||
}
|
||||
//if err := c.switchTable.start(); err != nil {
|
||||
// c.log.Errorln("Failed to start switch")
|
||||
// return nil, err
|
||||
//}
|
||||
|
||||
if err := c.router.start(); err != nil {
|
||||
c.log.Errorln("Failed to start router")
|
||||
return nil, err
|
||||
}
|
||||
//if err := c.router.start(); err != nil {
|
||||
// c.log.Errorln("Failed to start router")
|
||||
// return nil, err
|
||||
//}
|
||||
|
||||
c.Act(c, c._addPeerLoop)
|
||||
c.addPeerTimer = time.AfterFunc(0, func() {
|
||||
c.Act(nil, c._addPeerLoop)
|
||||
})
|
||||
|
||||
c.log.Infoln("Startup complete")
|
||||
return &c.config, nil
|
||||
|
@ -193,13 +166,17 @@ func (c *Core) Stop() {
|
|||
|
||||
// This function is unsafe and should only be ran by the core actor.
|
||||
func (c *Core) _stop() {
|
||||
c.PacketConn.Close()
|
||||
c.log.Infoln("Stopping...")
|
||||
if c.addPeerTimer != nil {
|
||||
c.addPeerTimer.Stop()
|
||||
c.addPeerTimer = nil
|
||||
}
|
||||
c.link.stop()
|
||||
c.links.stop()
|
||||
/* FIXME this deadlocks, need a waitgroup or something to coordinate shutdown
|
||||
for _, peer := range c.GetPeers() {
|
||||
c.DisconnectPeer(peer.Port)
|
||||
}
|
||||
*/
|
||||
c.log.Infoln("Stopped")
|
||||
}
|
|
@ -1,8 +1,9 @@
|
|||
package yggdrasil
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -50,10 +51,16 @@ func CreateAndConnectTwo(t testing.TB, verbose bool) (nodeA *Core, nodeB *Core)
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = nodeB.AddPeer("tcp://"+nodeA.link.tcp.getAddr().String(), "")
|
||||
u, err := url.Parse("tcp://" + nodeA.links.tcp.getAddr().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = nodeB.CallPeer(u, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if l := len(nodeA.GetPeers()); l != 1 {
|
||||
t.Fatal("unexpected number of peers", l)
|
||||
|
@ -70,7 +77,7 @@ func WaitConnected(nodeA, nodeB *Core) bool {
|
|||
// It may take up to 3 seconds, but let's wait 5.
|
||||
for i := 0; i < 50; i++ {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if len(nodeA.GetSwitchPeers()) > 0 && len(nodeB.GetSwitchPeers()) > 0 {
|
||||
if len(nodeA.GetPeers()) > 0 && len(nodeB.GetPeers()) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -80,26 +87,12 @@ func WaitConnected(nodeA, nodeB *Core) bool {
|
|||
// CreateEchoListener creates a routine listening on nodeA. It expects repeats messages of length bufLen.
|
||||
// It returns a channel used to synchronize the routine with caller.
|
||||
func CreateEchoListener(t testing.TB, nodeA *Core, bufLen int, repeats int) chan struct{} {
|
||||
// Listen. Doing it here guarantees that there will be something to try to connect when it returns.
|
||||
listener, err := nodeA.ConnListen()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Start routine
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer listener.Close()
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
buf := make([]byte, bufLen)
|
||||
|
||||
for i := 0; i < repeats; i++ {
|
||||
n, err := conn.Read(buf)
|
||||
n, from, err := nodeA.ReadFrom(buf)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
|
@ -108,7 +101,7 @@ func CreateEchoListener(t testing.TB, nodeA *Core, bufLen int, repeats int) chan
|
|||
t.Error("missing data")
|
||||
return
|
||||
}
|
||||
_, err = conn.Write(buf)
|
||||
_, err = nodeA.WriteTo(buf, from)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -127,6 +120,8 @@ func TestCore_Start_Connect(t *testing.T) {
|
|||
// TestCore_Start_Transfer checks that messages can be passed between nodes (in both directions).
|
||||
func TestCore_Start_Transfer(t *testing.T) {
|
||||
nodeA, nodeB := CreateAndConnectTwo(t, true)
|
||||
defer nodeA.Stop()
|
||||
defer nodeB.Stop()
|
||||
|
||||
msgLen := 1500
|
||||
done := CreateEchoListener(t, nodeA, msgLen, 1)
|
||||
|
@ -135,28 +130,19 @@ func TestCore_Start_Transfer(t *testing.T) {
|
|||
t.Fatal("nodes did not connect")
|
||||
}
|
||||
|
||||
// Dial
|
||||
dialer, err := nodeB.ConnDialer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
conn, err := dialer.Dial("nodeid", nodeA.NodeID().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
// Send
|
||||
msg := make([]byte, msgLen)
|
||||
rand.Read(msg)
|
||||
conn.Write(msg)
|
||||
_, err := nodeB.WriteTo(msg, nodeA.LocalAddr())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf := make([]byte, msgLen)
|
||||
_, err = conn.Read(buf)
|
||||
_, _, err = nodeB.ReadFrom(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Compare(msg, buf) != 0 {
|
||||
if !bytes.Equal(msg, buf) {
|
||||
t.Fatal("expected echo")
|
||||
}
|
||||
<-done
|
||||
|
@ -173,29 +159,20 @@ func BenchmarkCore_Start_Transfer(b *testing.B) {
|
|||
b.Fatal("nodes did not connect")
|
||||
}
|
||||
|
||||
// Dial
|
||||
dialer, err := nodeB.ConnDialer()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
conn, err := dialer.Dial("nodeid", nodeA.NodeID().String())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
// Send
|
||||
msg := make([]byte, msgLen)
|
||||
rand.Read(msg)
|
||||
buf := make([]byte, msgLen)
|
||||
|
||||
b.SetBytes(int64(b.N * msgLen))
|
||||
b.SetBytes(int64(msgLen))
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
conn.Write(msg)
|
||||
_, err := nodeB.WriteTo(msg, nodeA.LocalAddr())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
_, err = conn.Read(buf)
|
||||
_, _, err = nodeB.ReadFrom(buf)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
// +build debug
|
||||
|
||||
package yggdrasil
|
||||
package core
|
||||
|
||||
import "fmt"
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Package yggdrasil implements the core functionality of the Yggdrasil Network.
|
||||
Package core implements the core functionality of the Yggdrasil Network.
|
||||
|
||||
Introduction
|
||||
|
||||
|
@ -34,11 +34,11 @@ This may look something like this:
|
|||
"os"
|
||||
"github.com/gologme/log"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/core"
|
||||
)
|
||||
|
||||
type node struct {
|
||||
core yggdrasil.Core
|
||||
core core.Core
|
||||
config *config.NodeConfig
|
||||
log *log.Logger
|
||||
}
|
||||
|
@ -173,4 +173,4 @@ then you should manually implement acknowledgement and retransmission of
|
|||
messages.
|
||||
|
||||
*/
|
||||
package yggdrasil
|
||||
package core
|
262
src/core/link.go
Normal file
262
src/core/link.go
Normal file
|
@ -0,0 +1,262 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
//"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
"golang.org/x/net/proxy"
|
||||
//"github.com/Arceliar/phony" // TODO? use instead of mutexes
|
||||
)
|
||||
|
||||
type keyArray [ed25519.PublicKeySize]byte
|
||||
|
||||
type links struct {
|
||||
core *Core
|
||||
mutex sync.RWMutex // protects links below
|
||||
links map[linkInfo]*link
|
||||
tcp tcp // TCP interface support
|
||||
stopped chan struct{}
|
||||
// TODO timeout (to remove from switch), read from config.ReadTimeout
|
||||
}
|
||||
|
||||
// linkInfo is used as a map key
|
||||
type linkInfo struct {
|
||||
key keyArray
|
||||
linkType string // Type of link, e.g. TCP, AWDL
|
||||
local string // Local name or address
|
||||
remote string // Remote name or address
|
||||
}
|
||||
|
||||
type link struct {
|
||||
lname string
|
||||
links *links
|
||||
conn net.Conn
|
||||
options linkOptions
|
||||
info linkInfo
|
||||
incoming bool
|
||||
force bool
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
type linkOptions struct {
|
||||
pinnedEd25519Keys map[keyArray]struct{}
|
||||
}
|
||||
|
||||
func (l *links) init(c *Core) error {
|
||||
l.core = c
|
||||
l.mutex.Lock()
|
||||
l.links = make(map[linkInfo]*link)
|
||||
l.mutex.Unlock()
|
||||
l.stopped = make(chan struct{})
|
||||
|
||||
if err := l.tcp.init(l); err != nil {
|
||||
c.log.Errorln("Failed to start TCP interface")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *links) call(u *url.URL, sintf string) error {
|
||||
//u, err := url.Parse(uri)
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("peer %s is not correctly formatted (%s)", uri, err)
|
||||
//}
|
||||
tcpOpts := tcpOptions{}
|
||||
if pubkeys, ok := u.Query()["ed25519"]; ok && len(pubkeys) > 0 {
|
||||
tcpOpts.pinnedEd25519Keys = make(map[keyArray]struct{})
|
||||
for _, pubkey := range pubkeys {
|
||||
if sigPub, err := hex.DecodeString(pubkey); err == nil {
|
||||
var sigPubKey keyArray
|
||||
copy(sigPubKey[:], sigPub)
|
||||
tcpOpts.pinnedEd25519Keys[sigPubKey] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "tcp":
|
||||
l.tcp.call(u.Host, tcpOpts, sintf)
|
||||
case "socks":
|
||||
tcpOpts.socksProxyAddr = u.Host
|
||||
if u.User != nil {
|
||||
tcpOpts.socksProxyAuth = &proxy.Auth{}
|
||||
tcpOpts.socksProxyAuth.User = u.User.Username()
|
||||
tcpOpts.socksProxyAuth.Password, _ = u.User.Password()
|
||||
}
|
||||
pathtokens := strings.Split(strings.Trim(u.Path, "/"), "/")
|
||||
l.tcp.call(pathtokens[0], tcpOpts, sintf)
|
||||
case "tls":
|
||||
tcpOpts.upgrade = l.tcp.tls.forDialer
|
||||
l.tcp.call(u.Host, tcpOpts, sintf)
|
||||
default:
|
||||
return errors.New("unknown call scheme: " + u.Scheme)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *links) create(conn net.Conn, name, linkType, local, remote string, incoming, force bool, options linkOptions) (*link, error) {
|
||||
// Technically anything unique would work for names, but let's pick something human readable, just for debugging
|
||||
intf := link{
|
||||
conn: conn,
|
||||
lname: name,
|
||||
links: l,
|
||||
options: options,
|
||||
info: linkInfo{
|
||||
linkType: linkType,
|
||||
local: local,
|
||||
remote: remote,
|
||||
},
|
||||
incoming: incoming,
|
||||
force: force,
|
||||
}
|
||||
return &intf, nil
|
||||
}
|
||||
|
||||
func (l *links) stop() error {
|
||||
close(l.stopped)
|
||||
if err := l.tcp.stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (intf *link) handler() (chan struct{}, error) {
|
||||
// TODO split some of this into shorter functions, so it's easier to read, and for the FIXME duplicate peer issue mentioned later
|
||||
defer intf.conn.Close()
|
||||
meta := version_getBaseMetadata()
|
||||
meta.key = intf.links.core.public
|
||||
metaBytes := meta.encode()
|
||||
// TODO timeouts on send/recv (goroutine for send/recv, channel select w/ timer)
|
||||
var err error
|
||||
if !util.FuncTimeout(30*time.Second, func() {
|
||||
var n int
|
||||
n, err = intf.conn.Write(metaBytes)
|
||||
if err == nil && n != len(metaBytes) {
|
||||
err = errors.New("incomplete metadata send")
|
||||
}
|
||||
}) {
|
||||
return nil, errors.New("timeout on metadata send")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !util.FuncTimeout(30*time.Second, func() {
|
||||
var n int
|
||||
n, err = io.ReadFull(intf.conn, metaBytes)
|
||||
if err == nil && n != len(metaBytes) {
|
||||
err = errors.New("incomplete metadata recv")
|
||||
}
|
||||
}) {
|
||||
return nil, errors.New("timeout on metadata recv")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta = version_metadata{}
|
||||
base := version_getBaseMetadata()
|
||||
if !meta.decode(metaBytes) {
|
||||
return nil, errors.New("failed to decode metadata")
|
||||
}
|
||||
if !meta.check() {
|
||||
intf.links.core.log.Errorf("Failed to connect to node: %s is incompatible version (local %s, remote %s)",
|
||||
intf.lname,
|
||||
fmt.Sprintf("%d.%d", base.ver, base.minorVer),
|
||||
fmt.Sprintf("%d.%d", meta.ver, meta.minorVer),
|
||||
)
|
||||
return nil, errors.New("remote node is incompatible version")
|
||||
}
|
||||
// Check if the remote side matches the keys we expected. This is a bit of a weak
|
||||
// check - in future versions we really should check a signature or something like that.
|
||||
if pinned := intf.options.pinnedEd25519Keys; pinned != nil {
|
||||
var key keyArray
|
||||
copy(key[:], meta.key)
|
||||
if _, allowed := pinned[key]; !allowed {
|
||||
intf.links.core.log.Errorf("Failed to connect to node: %q sent ed25519 key that does not match pinned keys", intf.name())
|
||||
return nil, fmt.Errorf("failed to connect: host sent ed25519 key that does not match pinned keys")
|
||||
}
|
||||
}
|
||||
// Check if we're authorized to connect to this key / IP
|
||||
allowed := intf.links.core.config.GetCurrent().AllowedPublicKeys
|
||||
isallowed := len(allowed) == 0
|
||||
for _, k := range allowed {
|
||||
if k == hex.EncodeToString(meta.key) { // TODO: this is yuck
|
||||
isallowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if intf.incoming && !intf.force && !isallowed {
|
||||
intf.links.core.log.Warnf("%s connection from %s forbidden: AllowedEncryptionPublicKeys does not contain key %s",
|
||||
strings.ToUpper(intf.info.linkType), intf.info.remote, hex.EncodeToString(meta.key))
|
||||
intf.close()
|
||||
return nil, nil
|
||||
}
|
||||
// Check if we already have a link to this node
|
||||
copy(intf.info.key[:], meta.key)
|
||||
intf.links.mutex.Lock()
|
||||
if oldIntf, isIn := intf.links.links[intf.info]; isIn {
|
||||
intf.links.mutex.Unlock()
|
||||
// FIXME we should really return an error and let the caller block instead
|
||||
// That lets them do things like close connections on its own, avoid printing a connection message in the first place, etc.
|
||||
intf.links.core.log.Debugln("DEBUG: found existing interface for", intf.name())
|
||||
return oldIntf.closed, nil
|
||||
} else {
|
||||
intf.closed = make(chan struct{})
|
||||
intf.links.links[intf.info] = intf
|
||||
defer func() {
|
||||
intf.links.mutex.Lock()
|
||||
delete(intf.links.links, intf.info)
|
||||
intf.links.mutex.Unlock()
|
||||
close(intf.closed)
|
||||
}()
|
||||
intf.links.core.log.Debugln("DEBUG: registered interface for", intf.name())
|
||||
}
|
||||
intf.links.mutex.Unlock()
|
||||
themAddr := address.AddrForKey(ed25519.PublicKey(intf.info.key[:]))
|
||||
themAddrString := net.IP(themAddr[:]).String()
|
||||
themString := fmt.Sprintf("%s@%s", themAddrString, intf.info.remote)
|
||||
intf.links.core.log.Infof("Connected %s: %s, source %s",
|
||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
|
||||
// Run the handler
|
||||
err = intf.links.core.PacketConn.HandleConn(ed25519.PublicKey(intf.info.key[:]), intf.conn)
|
||||
// TODO don't report an error if it's just a 'use of closed network connection'
|
||||
if err != nil {
|
||||
intf.links.core.log.Infof("Disconnected %s: %s, source %s; error: %s",
|
||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local, err)
|
||||
} else {
|
||||
intf.links.core.log.Infof("Disconnected %s: %s, source %s",
|
||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (intf *link) close() {
|
||||
intf.conn.Close()
|
||||
}
|
||||
|
||||
func (intf *link) name() string {
|
||||
return intf.lname
|
||||
}
|
||||
|
||||
func (intf *link) local() string {
|
||||
return intf.info.local
|
||||
}
|
||||
|
||||
func (intf *link) remote() string {
|
||||
return intf.info.remote
|
||||
}
|
||||
|
||||
func (intf *link) interfaceType() string {
|
||||
return intf.info.linkType
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package yggdrasil
|
||||
package core
|
||||
|
||||
// This sends packets to peers using TCP as a transport
|
||||
// It's generally better tested than the UDP implementation
|
||||
|
@ -19,21 +19,22 @@ import (
|
|||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/proxy"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
//"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
)
|
||||
|
||||
const default_timeout = 6 * time.Second
|
||||
const tcp_ping_interval = (default_timeout * 2 / 3)
|
||||
|
||||
// The TCP listener and information about active TCP connections, to avoid duplication.
|
||||
type tcp struct {
|
||||
link *link
|
||||
links *links
|
||||
waitgroup sync.WaitGroup
|
||||
mutex sync.Mutex // Protecting the below
|
||||
listeners map[string]*TcpListener
|
||||
|
@ -48,7 +49,7 @@ type tcp struct {
|
|||
// multicast interfaces.
|
||||
type TcpListener struct {
|
||||
Listener net.Listener
|
||||
upgrade *TcpUpgrade
|
||||
opts tcpOptions
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
|
@ -94,8 +95,8 @@ func (t *tcp) getAddr() *net.TCPAddr {
|
|||
}
|
||||
|
||||
// Initializes the struct.
|
||||
func (t *tcp) init(l *link) error {
|
||||
t.link = l
|
||||
func (t *tcp) init(l *links) error {
|
||||
t.links = l
|
||||
t.tls.init(t)
|
||||
t.mutex.Lock()
|
||||
t.calls = make(map[string]struct{})
|
||||
|
@ -103,21 +104,16 @@ func (t *tcp) init(l *link) error {
|
|||
t.listeners = make(map[string]*TcpListener)
|
||||
t.mutex.Unlock()
|
||||
|
||||
t.link.core.config.Mutex.RLock()
|
||||
defer t.link.core.config.Mutex.RUnlock()
|
||||
for _, listenaddr := range t.link.core.config.Current.Listen {
|
||||
switch listenaddr[:6] {
|
||||
case "tcp://":
|
||||
if _, err := t.listen(listenaddr[6:], nil); err != nil {
|
||||
return err
|
||||
t.links.core.config.Mutex.RLock()
|
||||
defer t.links.core.config.Mutex.RUnlock()
|
||||
for _, listenaddr := range t.links.core.config.Current.Listen {
|
||||
u, err := url.Parse(listenaddr)
|
||||
if err != nil {
|
||||
t.links.core.log.Errorln("Failed to parse listener: listener", listenaddr, "is not correctly formatted, ignoring")
|
||||
}
|
||||
case "tls://":
|
||||
if _, err := t.listen(listenaddr[6:], t.tls.forListener); err != nil {
|
||||
if _, err := t.listenURL(u, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
t.link.core.log.Errorln("Failed to add listener: listener", listenaddr, "is not correctly formatted, ignoring")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -133,41 +129,25 @@ func (t *tcp) stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *tcp) reconfigure() {
|
||||
t.link.core.config.Mutex.RLock()
|
||||
added := util.Difference(t.link.core.config.Current.Listen, t.link.core.config.Previous.Listen)
|
||||
deleted := util.Difference(t.link.core.config.Previous.Listen, t.link.core.config.Current.Listen)
|
||||
t.link.core.config.Mutex.RUnlock()
|
||||
if len(added) > 0 || len(deleted) > 0 {
|
||||
for _, a := range added {
|
||||
switch a[:6] {
|
||||
case "tcp://":
|
||||
if _, err := t.listen(a[6:], nil); err != nil {
|
||||
t.link.core.log.Errorln("Error adding TCP", a[6:], "listener:", err)
|
||||
func (t *tcp) listenURL(u *url.URL, sintf string) (*TcpListener, error) {
|
||||
var listener *TcpListener
|
||||
var err error
|
||||
hostport := u.Host // Used for tcp and tls
|
||||
if len(sintf) != 0 {
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err == nil {
|
||||
hostport = fmt.Sprintf("[%s%%%s]:%s", host, sintf, port)
|
||||
}
|
||||
case "tls://":
|
||||
if _, err := t.listen(a[6:], t.tls.forListener); err != nil {
|
||||
t.link.core.log.Errorln("Error adding TLS", a[6:], "listener:", err)
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "tcp":
|
||||
listener, err = t.listen(hostport, nil)
|
||||
case "tls":
|
||||
listener, err = t.listen(hostport, t.tls.forListener)
|
||||
default:
|
||||
t.link.core.log.Errorln("Failed to add listener: listener", a, "is not correctly formatted, ignoring")
|
||||
}
|
||||
}
|
||||
for _, d := range deleted {
|
||||
if d[:6] != "tcp://" && d[:6] != "tls://" {
|
||||
t.link.core.log.Errorln("Failed to delete listener: listener", d, "is not correctly formatted, ignoring")
|
||||
continue
|
||||
}
|
||||
t.mutex.Lock()
|
||||
if listener, ok := t.listeners[d[6:]]; ok {
|
||||
t.mutex.Unlock()
|
||||
listener.Stop()
|
||||
t.link.core.log.Infoln("Stopped TCP listener:", d[6:])
|
||||
} else {
|
||||
t.mutex.Unlock()
|
||||
}
|
||||
}
|
||||
t.links.core.log.Errorln("Failed to add listener: listener", u.String(), "is not correctly formatted, ignoring")
|
||||
}
|
||||
return listener, err
|
||||
}
|
||||
|
||||
func (t *tcp) listen(listenaddr string, upgrade *TcpUpgrade) (*TcpListener, error) {
|
||||
|
@ -181,7 +161,7 @@ func (t *tcp) listen(listenaddr string, upgrade *TcpUpgrade) (*TcpListener, erro
|
|||
if err == nil {
|
||||
l := TcpListener{
|
||||
Listener: listener,
|
||||
upgrade: upgrade,
|
||||
opts: tcpOptions{upgrade: upgrade},
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.waitgroup.Add(1)
|
||||
|
@ -204,19 +184,18 @@ func (t *tcp) listener(l *TcpListener, listenaddr string) {
|
|||
t.mutex.Unlock()
|
||||
l.Listener.Close()
|
||||
return
|
||||
} else {
|
||||
}
|
||||
t.listeners[listenaddr] = l
|
||||
t.mutex.Unlock()
|
||||
}
|
||||
// And here we go!
|
||||
defer func() {
|
||||
t.link.core.log.Infoln("Stopping TCP listener on:", l.Listener.Addr().String())
|
||||
t.links.core.log.Infoln("Stopping TCP listener on:", l.Listener.Addr().String())
|
||||
l.Listener.Close()
|
||||
t.mutex.Lock()
|
||||
delete(t.listeners, listenaddr)
|
||||
t.mutex.Unlock()
|
||||
}()
|
||||
t.link.core.log.Infoln("Listening for TCP on:", l.Listener.Addr().String())
|
||||
t.links.core.log.Infoln("Listening for TCP on:", l.Listener.Addr().String())
|
||||
go func() {
|
||||
<-l.stop
|
||||
l.Listener.Close()
|
||||
|
@ -225,13 +204,17 @@ func (t *tcp) listener(l *TcpListener, listenaddr string) {
|
|||
for {
|
||||
sock, err := l.Listener.Accept()
|
||||
if err != nil {
|
||||
t.link.core.log.Errorln("Failed to accept connection:", err)
|
||||
t.links.core.log.Errorln("Failed to accept connection:", err)
|
||||
select {
|
||||
case <-l.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
time.Sleep(time.Second) // So we don't busy loop
|
||||
continue
|
||||
}
|
||||
t.waitgroup.Add(1)
|
||||
options := tcpOptions{
|
||||
upgrade: l.upgrade,
|
||||
}
|
||||
options := l.opts
|
||||
go t.handler(sock, true, options)
|
||||
}
|
||||
}
|
||||
|
@ -293,7 +276,9 @@ func (t *tcp) call(saddr string, options tcpOptions, sintf string) {
|
|||
}
|
||||
t.waitgroup.Add(1)
|
||||
options.socksPeerAddr = conn.RemoteAddr().String()
|
||||
t.handler(conn, false, options)
|
||||
if ch := t.handler(conn, false, options); ch != nil {
|
||||
<-ch
|
||||
}
|
||||
} else {
|
||||
dst, err := net.ResolveTCPAddr("tcp", saddr)
|
||||
if err != nil {
|
||||
|
@ -355,16 +340,18 @@ func (t *tcp) call(saddr string, options tcpOptions, sintf string) {
|
|||
}
|
||||
conn, err = dialer.Dial("tcp", dst.String())
|
||||
if err != nil {
|
||||
t.link.core.log.Debugf("Failed to dial %s: %s", callproto, err)
|
||||
t.links.core.log.Debugf("Failed to dial %s: %s", callproto, err)
|
||||
return
|
||||
}
|
||||
t.waitgroup.Add(1)
|
||||
t.handler(conn, false, options)
|
||||
if ch := t.handler(conn, false, options); ch != nil {
|
||||
<-ch
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (t *tcp) handler(sock net.Conn, incoming bool, options tcpOptions) {
|
||||
func (t *tcp) handler(sock net.Conn, incoming bool, options tcpOptions) chan struct{} {
|
||||
defer t.waitgroup.Done() // Happens after sock.close
|
||||
defer sock.Close()
|
||||
t.setExtraOptions(sock)
|
||||
|
@ -372,14 +359,11 @@ func (t *tcp) handler(sock net.Conn, incoming bool, options tcpOptions) {
|
|||
if options.upgrade != nil {
|
||||
var err error
|
||||
if sock, err = options.upgrade.upgrade(sock); err != nil {
|
||||
t.link.core.log.Errorln("TCP handler upgrade failed:", err)
|
||||
return
|
||||
} else {
|
||||
t.links.core.log.Errorln("TCP handler upgrade failed:", err)
|
||||
return nil
|
||||
}
|
||||
upgraded = true
|
||||
}
|
||||
}
|
||||
stream := stream{}
|
||||
stream.init(sock)
|
||||
var name, proto, local, remote string
|
||||
if options.socksProxyAddr != "" {
|
||||
name = "socks://" + sock.RemoteAddr().String() + "/" + options.socksPeerAddr
|
||||
|
@ -397,13 +381,30 @@ func (t *tcp) handler(sock net.Conn, incoming bool, options tcpOptions) {
|
|||
local, _, _ = net.SplitHostPort(sock.LocalAddr().String())
|
||||
remote, _, _ = net.SplitHostPort(sock.RemoteAddr().String())
|
||||
}
|
||||
localIP := net.ParseIP(local)
|
||||
if localIP = localIP.To16(); localIP != nil {
|
||||
var laddr address.Address
|
||||
var lsubnet address.Subnet
|
||||
copy(laddr[:], localIP)
|
||||
copy(lsubnet[:], localIP)
|
||||
if laddr.IsValid() || lsubnet.IsValid() {
|
||||
// The local address is with the network address/prefix range
|
||||
// This would route ygg over ygg, which we don't want
|
||||
// FIXME ideally this check should happen outside of the core library
|
||||
// Maybe dial/listen at the application level
|
||||
// Then pass a net.Conn to the core library (after these kinds of checks are done)
|
||||
t.links.core.log.Debugln("Dropping ygg-tunneled connection", local, remote)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
force := net.ParseIP(strings.Split(remote, "%")[0]).IsLinkLocalUnicast()
|
||||
link, err := t.link.core.link.create(&stream, name, proto, local, remote, incoming, force, options.linkOptions)
|
||||
link, err := t.links.create(sock, name, proto, local, remote, incoming, force, options.linkOptions)
|
||||
if err != nil {
|
||||
t.link.core.log.Println(err)
|
||||
t.links.core.log.Println(err)
|
||||
panic(err)
|
||||
}
|
||||
t.link.core.log.Debugln("DEBUG: starting handler for", name)
|
||||
err = link.handler()
|
||||
t.link.core.log.Debugln("DEBUG: stopped handler for", name, err)
|
||||
t.links.core.log.Debugln("DEBUG: starting handler for", name)
|
||||
ch, err := link.handler()
|
||||
t.links.core.log.Debugln("DEBUG: stopped handler for", name, err)
|
||||
return ch
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
// +build darwin
|
||||
|
||||
package yggdrasil
|
||||
package core
|
||||
|
||||
import (
|
||||
"syscall"
|
|
@ -1,6 +1,6 @@
|
|||
// +build linux
|
||||
|
||||
package yggdrasil
|
||||
package core
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
@ -20,10 +20,10 @@ func (t *tcp) tcpContext(network, address string, c syscall.RawConn) error {
|
|||
|
||||
// Log any errors
|
||||
if bbr != nil {
|
||||
t.link.core.log.Debugln("Failed to set tcp_congestion_control to bbr for socket, SetsockoptString error:", bbr)
|
||||
t.links.core.log.Debugln("Failed to set tcp_congestion_control to bbr for socket, SetsockoptString error:", bbr)
|
||||
}
|
||||
if control != nil {
|
||||
t.link.core.log.Debugln("Failed to set tcp_congestion_control to bbr for socket, Control error:", control)
|
||||
t.links.core.log.Debugln("Failed to set tcp_congestion_control to bbr for socket, Control error:", control)
|
||||
}
|
||||
|
||||
// Return nil because errors here are not considered fatal for the connection, it just means congestion control is suboptimal
|
||||
|
@ -38,7 +38,7 @@ func (t *tcp) getControl(sintf string) func(string, string, syscall.RawConn) err
|
|||
}
|
||||
c.Control(btd)
|
||||
if err != nil {
|
||||
t.link.core.log.Debugln("Failed to set SO_BINDTODEVICE:", sintf)
|
||||
t.links.core.log.Debugln("Failed to set SO_BINDTODEVICE:", sintf)
|
||||
}
|
||||
return t.tcpContext(network, address, c)
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
// +build !darwin,!linux
|
||||
|
||||
package yggdrasil
|
||||
package core
|
||||
|
||||
import (
|
||||
"syscall"
|
|
@ -1,4 +1,4 @@
|
|||
package yggdrasil
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -34,7 +34,7 @@ func (t *tcptls) init(tcp *tcp) {
|
|||
}
|
||||
|
||||
edpriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize)
|
||||
copy(edpriv[:], tcp.link.core.sigPriv[:])
|
||||
copy(edpriv[:], tcp.links.core.secret[:])
|
||||
|
||||
certBuf := &bytes.Buffer{}
|
||||
|
||||
|
@ -42,7 +42,7 @@ func (t *tcptls) init(tcp *tcp) {
|
|||
pubtemp := x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{
|
||||
CommonName: hex.EncodeToString(tcp.link.core.sigPub[:]),
|
||||
CommonName: hex.EncodeToString(tcp.links.core.public[:]),
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
|
@ -1,10 +1,10 @@
|
|||
package yggdrasil
|
||||
package core
|
||||
|
||||
// This file contains the version metadata struct
|
||||
// Used in the initial connection setup and key exchange
|
||||
// Some of this could arguably go in wire.go instead
|
||||
|
||||
import "github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
import "crypto/ed25519"
|
||||
|
||||
const (
|
||||
ProtocolMajorVersion uint64 = 0 // Major version number of the protocol.
|
||||
|
@ -16,31 +16,27 @@ const (
|
|||
// The current version also includes a minor version number, and the box/sig/link keys that need to be exchanged to open a connection.
|
||||
type version_metadata struct {
|
||||
meta [4]byte
|
||||
ver uint64 // 1 byte in this version
|
||||
ver uint8 // 1 byte in this version
|
||||
// Everything after this point potentially depends on the version number, and is subject to change in future versions
|
||||
minorVer uint64 // 1 byte in this version
|
||||
box crypto.BoxPubKey
|
||||
sig crypto.SigPubKey
|
||||
link crypto.BoxPubKey
|
||||
minorVer uint8 // 1 byte in this version
|
||||
key ed25519.PublicKey
|
||||
}
|
||||
|
||||
// Gets a base metadata with no keys set, but with the correct version numbers.
|
||||
func version_getBaseMetadata() version_metadata {
|
||||
return version_metadata{
|
||||
meta: [4]byte{'m', 'e', 't', 'a'},
|
||||
ver: ProtocolMajorVersion,
|
||||
minorVer: ProtocolMinorVersion,
|
||||
ver: 0,
|
||||
minorVer: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Gest the length of the metadata for this version, used to know how many bytes to read from the start of a connection.
|
||||
// Gets the length of the metadata for this version, used to know how many bytes to read from the start of a connection.
|
||||
func version_getMetaLength() (mlen int) {
|
||||
mlen += 4 // meta
|
||||
mlen += 1 // ver, as long as it's < 127, which it is in this version
|
||||
mlen += 1 // minorVer, as long as it's < 127, which it is in this version
|
||||
mlen += crypto.BoxPubKeyLen // box
|
||||
mlen += crypto.SigPubKeyLen // sig
|
||||
mlen += crypto.BoxPubKeyLen // link
|
||||
mlen++ // ver, as long as it's < 127, which it is in this version
|
||||
mlen++ // minorVer, as long as it's < 127, which it is in this version
|
||||
mlen += ed25519.PublicKeySize // key
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -48,11 +44,9 @@ func version_getMetaLength() (mlen int) {
|
|||
func (m *version_metadata) encode() []byte {
|
||||
bs := make([]byte, 0, version_getMetaLength())
|
||||
bs = append(bs, m.meta[:]...)
|
||||
bs = append(bs, wire_encode_uint64(m.ver)...)
|
||||
bs = append(bs, wire_encode_uint64(m.minorVer)...)
|
||||
bs = append(bs, m.box[:]...)
|
||||
bs = append(bs, m.sig[:]...)
|
||||
bs = append(bs, m.link[:]...)
|
||||
bs = append(bs, m.ver)
|
||||
bs = append(bs, m.minorVer)
|
||||
bs = append(bs, m.key[:]...)
|
||||
if len(bs) != version_getMetaLength() {
|
||||
panic("Inconsistent metadata length")
|
||||
}
|
||||
|
@ -61,20 +55,14 @@ func (m *version_metadata) encode() []byte {
|
|||
|
||||
// Decodes version metadata from its wire format into the struct.
|
||||
func (m *version_metadata) decode(bs []byte) bool {
|
||||
switch {
|
||||
case !wire_chop_slice(m.meta[:], &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&m.ver, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&m.minorVer, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(m.box[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(m.sig[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(m.link[:], &bs):
|
||||
if len(bs) != version_getMetaLength() {
|
||||
return false
|
||||
}
|
||||
offset := 0
|
||||
offset += copy(m.meta[:], bs[offset:])
|
||||
m.ver, offset = bs[offset], offset+1
|
||||
m.minorVer, offset = bs[offset], offset+1
|
||||
m.key = append([]byte(nil), bs[offset:]...)
|
||||
return true
|
||||
}
|
||||
|
|
@ -1,305 +0,0 @@
|
|||
// Package crypto is a wrapper around packages under golang.org/x/crypto/, particulaly curve25519, ed25519, and nacl/box.
|
||||
// This is used to avoid explicitly importing and using these packages throughout yggdrasil.
|
||||
// It also includes the all-important NodeID and TreeID types, which are used to identify nodes in the DHT and in the spanning tree's root selection algorithm, respectively.
|
||||
package crypto
|
||||
|
||||
/*
|
||||
|
||||
This part of the package wraps crypto operations needed elsewhere
|
||||
|
||||
In particular, it exposes key generation for ed25519 and nacl box
|
||||
|
||||
It also defines NodeID and TreeID as hashes of keys, and wraps hash functions
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"golang.org/x/crypto/nacl/box"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// NodeID and TreeID
|
||||
|
||||
// NodeIDLen is the length (in bytes) of a NodeID.
|
||||
const NodeIDLen = sha512.Size
|
||||
|
||||
// TreeIDLen is the length (in bytes) of a TreeID.
|
||||
const TreeIDLen = sha512.Size
|
||||
|
||||
// handleLen is the length (in bytes) of a Handle.
|
||||
const handleLen = 8
|
||||
|
||||
// NodeID is how a yggdrasil node is identified in the DHT, and is used to derive IPv6 addresses and subnets in the main executable. It is a sha512sum hash of the node's BoxPubKey
|
||||
type NodeID [NodeIDLen]byte
|
||||
|
||||
// TreeID is how a yggdrasil node is identified in the root selection algorithm used to construct the spanning tree.
|
||||
type TreeID [TreeIDLen]byte
|
||||
|
||||
type Handle [handleLen]byte
|
||||
|
||||
func (n *NodeID) String() string {
|
||||
return hex.EncodeToString(n[:])
|
||||
}
|
||||
|
||||
// Network returns "nodeid" nearly always right now.
|
||||
func (n *NodeID) Network() string {
|
||||
return "nodeid"
|
||||
}
|
||||
|
||||
// PrefixLength returns the number of bits set in a masked NodeID.
|
||||
func (n *NodeID) PrefixLength() int {
|
||||
var len int
|
||||
for i, v := range *n {
|
||||
_, _ = i, v
|
||||
if v == 0xff {
|
||||
len += 8
|
||||
continue
|
||||
}
|
||||
for v&0x80 != 0 {
|
||||
len++
|
||||
v <<= 1
|
||||
}
|
||||
if v != 0 {
|
||||
return -1
|
||||
}
|
||||
for i++; i < NodeIDLen; i++ {
|
||||
if n[i] != 0 {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
return len
|
||||
}
|
||||
|
||||
// GetNodeID returns the NodeID associated with a BoxPubKey.
|
||||
func GetNodeID(pub *BoxPubKey) *NodeID {
|
||||
h := sha512.Sum512(pub[:])
|
||||
return (*NodeID)(&h)
|
||||
}
|
||||
|
||||
// GetTreeID returns the TreeID associated with a BoxPubKey
|
||||
func GetTreeID(pub *SigPubKey) *TreeID {
|
||||
h := sha512.Sum512(pub[:])
|
||||
return (*TreeID)(&h)
|
||||
}
|
||||
|
||||
// NewHandle returns a new (cryptographically random) Handle, used by the session code to identify which session an incoming packet is associated with.
|
||||
func NewHandle() *Handle {
|
||||
var h Handle
|
||||
_, err := rand.Read(h[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &h
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Signatures
|
||||
|
||||
// SigPubKeyLen is the length of a SigPubKey in bytes.
|
||||
const SigPubKeyLen = ed25519.PublicKeySize
|
||||
|
||||
// SigPrivKeyLen is the length of a SigPrivKey in bytes.
|
||||
const SigPrivKeyLen = ed25519.PrivateKeySize
|
||||
|
||||
// SigLen is the length of SigBytes.
|
||||
const SigLen = ed25519.SignatureSize
|
||||
|
||||
// SigPubKey is a public ed25519 signing key.
|
||||
type SigPubKey [SigPubKeyLen]byte
|
||||
|
||||
// SigPrivKey is a private ed25519 signing key.
|
||||
type SigPrivKey [SigPrivKeyLen]byte
|
||||
|
||||
// SigBytes is an ed25519 signature.
|
||||
type SigBytes [SigLen]byte
|
||||
|
||||
// NewSigKeys generates a public/private ed25519 key pair.
|
||||
func NewSigKeys() (*SigPubKey, *SigPrivKey) {
|
||||
var pub SigPubKey
|
||||
var priv SigPrivKey
|
||||
pubSlice, privSlice, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
copy(pub[:], pubSlice)
|
||||
copy(priv[:], privSlice)
|
||||
return &pub, &priv
|
||||
}
|
||||
|
||||
// Sign returns the SigBytes signing a message.
|
||||
func Sign(priv *SigPrivKey, msg []byte) *SigBytes {
|
||||
var sig SigBytes
|
||||
sigSlice := ed25519.Sign(priv[:], msg)
|
||||
copy(sig[:], sigSlice)
|
||||
return &sig
|
||||
}
|
||||
|
||||
// Verify returns true if the provided signature matches the key and message.
|
||||
func Verify(pub *SigPubKey, msg []byte, sig *SigBytes) bool {
|
||||
// Should sig be an array instead of a slice?...
|
||||
// It's fixed size, but
|
||||
return ed25519.Verify(pub[:], msg, sig[:])
|
||||
}
|
||||
|
||||
// Public returns the SigPubKey associated with this SigPrivKey.
|
||||
func (p SigPrivKey) Public() SigPubKey {
|
||||
priv := make(ed25519.PrivateKey, ed25519.PrivateKeySize)
|
||||
copy(priv[:], p[:])
|
||||
pub := priv.Public().(ed25519.PublicKey)
|
||||
var sigPub SigPubKey
|
||||
copy(sigPub[:], pub[:])
|
||||
return sigPub
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// NaCl-like crypto "box" (curve25519+xsalsa20+poly1305)
|
||||
|
||||
// BoxPubKeyLen is the length of a BoxPubKey in bytes.
|
||||
const BoxPubKeyLen = 32
|
||||
|
||||
// BoxPrivKeyLen is the length of a BoxPrivKey in bytes.
|
||||
const BoxPrivKeyLen = 32
|
||||
|
||||
// BoxSharedKeyLen is the length of a BoxSharedKey in bytes.
|
||||
const BoxSharedKeyLen = 32
|
||||
|
||||
// BoxNonceLen is the length of a BoxNonce in bytes.
|
||||
const BoxNonceLen = 24
|
||||
|
||||
// BoxOverhead is the length of the overhead from boxing something.
|
||||
const BoxOverhead = box.Overhead
|
||||
|
||||
// BoxPubKey is a NaCl-like "box" public key (curve25519+xsalsa20+poly1305).
|
||||
type BoxPubKey [BoxPubKeyLen]byte
|
||||
|
||||
// BoxPrivKey is a NaCl-like "box" private key (curve25519+xsalsa20+poly1305).
|
||||
type BoxPrivKey [BoxPrivKeyLen]byte
|
||||
|
||||
// BoxSharedKey is a NaCl-like "box" shared key (curve25519+xsalsa20+poly1305).
|
||||
type BoxSharedKey [BoxSharedKeyLen]byte
|
||||
|
||||
// BoxNonce is the nonce used in NaCl-like crypto "box" operations (curve25519+xsalsa20+poly1305), and must not be reused for different messages encrypted using the same BoxSharedKey.
|
||||
type BoxNonce [BoxNonceLen]byte
|
||||
|
||||
// String returns a string representation of the "box" key.
|
||||
func (k BoxPubKey) String() string {
|
||||
return hex.EncodeToString(k[:])
|
||||
}
|
||||
|
||||
// Network returns "curve25519" for "box" keys.
|
||||
func (n BoxPubKey) Network() string {
|
||||
return "curve25519"
|
||||
}
|
||||
|
||||
// NewBoxKeys generates a new pair of public/private crypto box keys.
|
||||
func NewBoxKeys() (*BoxPubKey, *BoxPrivKey) {
|
||||
pubBytes, privBytes, err := box.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pub := (*BoxPubKey)(pubBytes)
|
||||
priv := (*BoxPrivKey)(privBytes)
|
||||
return pub, priv
|
||||
}
|
||||
|
||||
// GetSharedKey returns the shared key derived from your private key and the destination's public key.
|
||||
func GetSharedKey(myPrivKey *BoxPrivKey,
|
||||
othersPubKey *BoxPubKey) *BoxSharedKey {
|
||||
var shared [BoxSharedKeyLen]byte
|
||||
priv := (*[BoxPrivKeyLen]byte)(myPrivKey)
|
||||
pub := (*[BoxPubKeyLen]byte)(othersPubKey)
|
||||
box.Precompute(&shared, pub, priv)
|
||||
return (*BoxSharedKey)(&shared)
|
||||
}
|
||||
|
||||
// BoxOpen returns a message and true if it successfully opens a crypto box using the provided shared key and nonce.
|
||||
func BoxOpen(shared *BoxSharedKey,
|
||||
boxed []byte,
|
||||
nonce *BoxNonce) ([]byte, bool) {
|
||||
out := util.GetBytes()
|
||||
s := (*[BoxSharedKeyLen]byte)(shared)
|
||||
n := (*[BoxNonceLen]byte)(nonce)
|
||||
unboxed, success := box.OpenAfterPrecomputation(out, boxed, n, s)
|
||||
return unboxed, success
|
||||
}
|
||||
|
||||
// BoxSeal seals a crypto box using the provided shared key, returning the box and the nonce needed to decrypt it.
|
||||
// If nonce is nil, a random BoxNonce will be used and returned.
|
||||
// If nonce is non-nil, then nonce.Increment() will be called before using it, and the incremented BoxNonce is what is returned.
|
||||
func BoxSeal(shared *BoxSharedKey, unboxed []byte, nonce *BoxNonce) ([]byte, *BoxNonce) {
|
||||
if nonce == nil {
|
||||
nonce = NewBoxNonce()
|
||||
}
|
||||
nonce.Increment()
|
||||
out := util.GetBytes()
|
||||
s := (*[BoxSharedKeyLen]byte)(shared)
|
||||
n := (*[BoxNonceLen]byte)(nonce)
|
||||
boxed := box.SealAfterPrecomputation(out, unboxed, n, s)
|
||||
return boxed, nonce
|
||||
}
|
||||
|
||||
// NewBoxNonce generates a (cryptographically) random BoxNonce.
|
||||
func NewBoxNonce() *BoxNonce {
|
||||
var nonce BoxNonce
|
||||
_, err := rand.Read(nonce[:])
|
||||
for ; err == nil && nonce[0] == 0xff; _, err = rand.Read(nonce[:]) {
|
||||
// Make sure nonce isn't too high
|
||||
// This is just to make rollover unlikely to happen
|
||||
// Rollover is fine, but it may kill the session and force it to reopen
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &nonce
|
||||
}
|
||||
|
||||
// Increment adds 2 to a BoxNonce, which is useful if one node intends to send only with odd BoxNonce values, and the other only with even BoxNonce values.
|
||||
func (n *BoxNonce) Increment() {
|
||||
oldNonce := *n
|
||||
n[len(n)-1] += 2
|
||||
for i := len(n) - 2; i >= 0; i-- {
|
||||
if n[i+1] < oldNonce[i+1] {
|
||||
n[i] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Public returns the BoxPubKey associated with this BoxPrivKey.
|
||||
func (p BoxPrivKey) Public() BoxPubKey {
|
||||
var boxPub [BoxPubKeyLen]byte
|
||||
var boxPriv [BoxPrivKeyLen]byte
|
||||
copy(boxPriv[:BoxPrivKeyLen], p[:BoxPrivKeyLen])
|
||||
curve25519.ScalarBaseMult(&boxPub, &boxPriv)
|
||||
return boxPub
|
||||
}
|
||||
|
||||
// Minus is the result of subtracting the provided BoNonce from this BoxNonce, bounded at +- 64.
|
||||
// It's primarily used to determine if a new BoxNonce is higher than the last known BoxNonce from a crypto session, and by how much.
|
||||
// This is used in the machinery that makes sure replayed packets can't keep a session open indefinitely or stuck using old/bad information about a node.
|
||||
func (n *BoxNonce) Minus(m *BoxNonce) int64 {
|
||||
diff := int64(0)
|
||||
for idx := range n {
|
||||
diff *= 256
|
||||
diff += int64(n[idx]) - int64(m[idx])
|
||||
if diff > 64 {
|
||||
diff = 64
|
||||
}
|
||||
if diff < -64 {
|
||||
diff = -64
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
|
@ -1,7 +1,5 @@
|
|||
package defaults
|
||||
|
||||
import "github.com/yggdrasil-network/yggdrasil-go/src/types"
|
||||
|
||||
// Defines which parameters are expected by default for configuration on a
|
||||
// specific platform. These values are populated in the relevant defaults_*.go
|
||||
// for the platform being targeted. They must be set.
|
||||
|
@ -17,7 +15,7 @@ type platformDefaultParameters struct {
|
|||
DefaultMulticastDNSInterfaces []string
|
||||
|
||||
// TUN/TAP
|
||||
MaximumIfMTU types.MTU
|
||||
DefaultIfMTU types.MTU
|
||||
MaximumIfMTU uint64
|
||||
DefaultIfMTU uint64
|
||||
DefaultIfName string
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ func GetDefaults() platformDefaultParameters {
|
|||
DefaultAdminListen: "unix:///var/run/yggdrasil.sock",
|
||||
|
||||
// Configuration (used for yggdrasilctl)
|
||||
DefaultConfigFile: "/etc/yggdrasil.conf",
|
||||
DefaultConfigFile: "/usr/local/etc/yggdrasil.conf",
|
||||
|
||||
// Multicast interfaces
|
||||
DefaultMulticastInterfaces: []string{
|
||||
|
|
|
@ -23,6 +23,6 @@ func GetDefaults() platformDefaultParameters {
|
|||
// TUN/TAP
|
||||
MaximumIfMTU: 16384,
|
||||
DefaultIfMTU: 16384,
|
||||
DefaultIfName: "/dev/tun0",
|
||||
DefaultIfName: "tun0",
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,16 +5,15 @@ import (
|
|||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/admin"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/core"
|
||||
)
|
||||
|
||||
// Module is an interface that defines which functions must be supported by a
|
||||
// given Yggdrasil module.
|
||||
type Module interface {
|
||||
Init(core *yggdrasil.Core, state *config.NodeState, log *log.Logger, options interface{}) error
|
||||
Init(core *core.Core, state *config.NodeState, log *log.Logger, options interface{}) error
|
||||
Start() error
|
||||
Stop() error
|
||||
UpdateConfig(config *config.NodeConfig)
|
||||
SetupAdminHandlers(a *admin.AdminSocket)
|
||||
IsStarted() bool
|
||||
}
|
||||
|
|
|
@ -1,13 +1,34 @@
|
|||
package multicast
|
||||
|
||||
import "github.com/yggdrasil-network/yggdrasil-go/src/admin"
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/admin"
|
||||
)
|
||||
|
||||
type GetMulticastInterfacesRequest struct{}
|
||||
type GetMulticastInterfacesResponse struct {
|
||||
Interfaces []string `json:"multicast_interfaces"`
|
||||
}
|
||||
|
||||
func (m *Multicast) getMulticastInterfacesHandler(req *GetMulticastInterfacesRequest, res *GetMulticastInterfacesResponse) error {
|
||||
res.Interfaces = []string{}
|
||||
for _, v := range m.Interfaces() {
|
||||
res.Interfaces = append(res.Interfaces, v.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Multicast) SetupAdminHandlers(a *admin.AdminSocket) {
|
||||
a.AddHandler("getMulticastInterfaces", []string{}, func(in admin.Info) (admin.Info, error) {
|
||||
var intfs []string
|
||||
for _, v := range m.Interfaces() {
|
||||
intfs = append(intfs, v.Name)
|
||||
_ = a.AddHandler("getMulticastInterfaces", []string{}, func(in json.RawMessage) (interface{}, error) {
|
||||
req := &GetMulticastInterfacesRequest{}
|
||||
res := &GetMulticastInterfacesResponse{}
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return admin.Info{"multicast_interfaces": intfs}, nil
|
||||
if err := m.getMulticastInterfacesHandler(req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
|
@ -11,61 +12,49 @@ import (
|
|||
"github.com/gologme/log"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/core"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
const (
|
||||
// GroupAddr contains the multicast group and port used for multicast packets.
|
||||
GroupAddr = "[ff02::114]:9001"
|
||||
)
|
||||
|
||||
// Multicast represents the multicast advertisement and discovery mechanism used
|
||||
// by Yggdrasil to find peers on the same subnet. When a beacon is received on a
|
||||
// configured multicast interface, Yggdrasil will attempt to peer with that node
|
||||
// automatically.
|
||||
type Multicast struct {
|
||||
phony.Inbox
|
||||
core *yggdrasil.Core
|
||||
core *core.Core
|
||||
config *config.NodeState
|
||||
log *log.Logger
|
||||
sock *ipv6.PacketConn
|
||||
groupAddr *net.UDPAddr
|
||||
listeners map[string]*multicastInterface
|
||||
groupAddr string
|
||||
listeners map[string]*listenerInfo
|
||||
listenPort uint16
|
||||
isOpen bool
|
||||
monitor *time.Timer
|
||||
platformhandler *time.Timer
|
||||
_interfaces map[string]net.Interface
|
||||
_interfaceAddrs map[string]addrInfo
|
||||
_interfaces map[string]interfaceInfo
|
||||
}
|
||||
|
||||
type addrInfo struct {
|
||||
type interfaceInfo struct {
|
||||
iface net.Interface
|
||||
addrs []net.Addr
|
||||
time time.Time
|
||||
}
|
||||
|
||||
type multicastInterface struct {
|
||||
phony.Inbox
|
||||
sock *ipv6.PacketConn
|
||||
destAddr net.UDPAddr
|
||||
listener *yggdrasil.TcpListener
|
||||
zone string
|
||||
timer *time.Timer
|
||||
type listenerInfo struct {
|
||||
listener *core.TcpListener
|
||||
time time.Time
|
||||
interval time.Duration
|
||||
stop chan interface{}
|
||||
}
|
||||
|
||||
// Init prepares the multicast interface for use.
|
||||
func (m *Multicast) Init(core *yggdrasil.Core, state *config.NodeState, log *log.Logger, options interface{}) (err error) {
|
||||
func (m *Multicast) Init(core *core.Core, state *config.NodeState, log *log.Logger, options interface{}) error {
|
||||
m.core = core
|
||||
m.config = state
|
||||
m.log = log
|
||||
m.listeners = make(map[string]*multicastInterface)
|
||||
m.listeners = make(map[string]*listenerInfo)
|
||||
m._interfaces = make(map[string]interfaceInfo)
|
||||
current := m.config.GetCurrent()
|
||||
m.listenPort = current.LinkLocalTCPPort
|
||||
m.groupAddr, err = net.ResolveUDPAddr("udp6", GroupAddr)
|
||||
return
|
||||
m.groupAddr = "[ff02::114]:9001"
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the multicast interface. This launches goroutines which will
|
||||
|
@ -88,7 +77,7 @@ func (m *Multicast) _start() error {
|
|||
return nil
|
||||
}
|
||||
m.log.Infoln("Starting multicast module")
|
||||
addr, err := net.ResolveUDPAddr("udp", GroupAddr)
|
||||
addr, err := net.ResolveUDPAddr("udp", m.groupAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -108,7 +97,7 @@ func (m *Multicast) _start() error {
|
|||
m.isOpen = true
|
||||
go m.listen()
|
||||
m.Act(nil, m._multicastStarted)
|
||||
m.Act(nil, m._monitorInterfaceChanges)
|
||||
m.Act(nil, m._announce)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -135,131 +124,41 @@ func (m *Multicast) Stop() error {
|
|||
func (m *Multicast) _stop() error {
|
||||
m.log.Infoln("Stopping multicast module")
|
||||
m.isOpen = false
|
||||
for name := range m.listeners {
|
||||
close(m.listeners[name].stop)
|
||||
delete(m.listeners, name)
|
||||
}
|
||||
if m.platformhandler != nil {
|
||||
m.platformhandler.Stop()
|
||||
}
|
||||
if m.sock != nil {
|
||||
m.sock.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateConfig updates the multicast module with the provided config.NodeConfig
|
||||
// and then signals the various module goroutines to reconfigure themselves if
|
||||
// needed.
|
||||
func (m *Multicast) UpdateConfig(config *config.NodeConfig) {
|
||||
m.Act(nil, func() { m._updateConfig(config) })
|
||||
}
|
||||
|
||||
func (m *Multicast) _updateConfig(config *config.NodeConfig) {
|
||||
m.log.Infoln("Reloading multicast configuration...")
|
||||
if m.isOpen {
|
||||
if len(config.MulticastInterfaces) == 0 || config.LinkLocalTCPPort != m.listenPort {
|
||||
if err := m._stop(); err != nil {
|
||||
m.log.Errorln("Error stopping multicast module:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
m.config.Replace(*config)
|
||||
m.listenPort = config.LinkLocalTCPPort
|
||||
if !m.isOpen && len(config.MulticastInterfaces) > 0 {
|
||||
if err := m._start(); err != nil {
|
||||
m.log.Errorln("Error starting multicast module:", err)
|
||||
}
|
||||
}
|
||||
m.log.Debugln("Reloaded multicast configuration successfully")
|
||||
}
|
||||
|
||||
func (m *Multicast) _monitorInterfaceChanges() {
|
||||
m._updateInterfaces() // update interfaces and interfaceAddrs
|
||||
|
||||
// Look for interfaces we don't know about yet.
|
||||
for name, intf := range m._interfaces {
|
||||
if _, ok := m.listeners[name]; !ok {
|
||||
// Look up interface addresses.
|
||||
addrs := m._interfaceAddrs[intf.Name].addrs
|
||||
// Find the first link-local address.
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
// Join the multicast group.
|
||||
m.sock.JoinGroup(&intf, m.groupAddr)
|
||||
// Construct a listener on this address.
|
||||
listenaddr := fmt.Sprintf("[%s%%%s]:%d", addrIP, intf.Name, m.listenPort)
|
||||
listener, err := m.core.ListenTCP(listenaddr)
|
||||
func (m *Multicast) _updateInterfaces() {
|
||||
interfaces := make(map[string]interfaceInfo)
|
||||
intfs := m.getAllowedInterfaces()
|
||||
for _, intf := range intfs {
|
||||
addrs, err := intf.Addrs()
|
||||
if err != nil {
|
||||
m.log.Warnln("Not multicasting on", name, "due to error:", err)
|
||||
m.log.Warnf("Failed up get addresses for interface %s: %s", intf.Name, err)
|
||||
continue
|
||||
}
|
||||
// This is a new interface. Start an announcer for it.
|
||||
multicastInterface := &multicastInterface{
|
||||
sock: m.sock,
|
||||
destAddr: *m.groupAddr,
|
||||
listener: listener,
|
||||
stop: make(chan interface{}),
|
||||
zone: name,
|
||||
}
|
||||
multicastInterface.Act(m, multicastInterface._announce)
|
||||
m.listeners[name] = multicastInterface
|
||||
m.log.Debugln("Started multicasting on", name)
|
||||
break
|
||||
interfaces[intf.Name] = interfaceInfo{
|
||||
iface: intf,
|
||||
addrs: addrs,
|
||||
}
|
||||
}
|
||||
}
|
||||
// Look for interfaces we knew about but are no longer there.
|
||||
for name, intf := range m.listeners {
|
||||
if _, ok := m._interfaces[name]; !ok {
|
||||
// This is a disappeared interface. Stop the announcer.
|
||||
close(intf.stop)
|
||||
delete(m.listeners, name)
|
||||
m.log.Debugln("Stopped multicasting on", name)
|
||||
}
|
||||
}
|
||||
// Queue the next check.
|
||||
m.monitor = time.AfterFunc(time.Second, func() {
|
||||
m.Act(nil, m._monitorInterfaceChanges)
|
||||
})
|
||||
m._interfaces = interfaces
|
||||
}
|
||||
|
||||
func (m *multicastInterface) _announce() {
|
||||
// Check if the multicast interface has been stopped. This will happen
|
||||
// if it disappears from the system or goes down.
|
||||
select {
|
||||
case <-m.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
// Send the beacon.
|
||||
lladdr := m.listener.Listener.Addr().String()
|
||||
if a, err := net.ResolveTCPAddr("tcp6", lladdr); err == nil {
|
||||
a.Zone = ""
|
||||
msg := []byte(a.String())
|
||||
m.sock.WriteTo(msg, nil, &m.destAddr)
|
||||
}
|
||||
// Queue the next beacon.
|
||||
if m.interval.Seconds() < 15 {
|
||||
m.interval += time.Second
|
||||
}
|
||||
m.timer = time.AfterFunc(m.interval, func() {
|
||||
m.Act(nil, m._announce)
|
||||
})
|
||||
}
|
||||
|
||||
// GetInterfaces returns the currently known/enabled multicast interfaces. It is
|
||||
// expected that UpdateInterfaces has been called at least once before calling
|
||||
// this method.
|
||||
func (m *Multicast) Interfaces() map[string]net.Interface {
|
||||
var interfaces map[string]net.Interface
|
||||
interfaces := make(map[string]net.Interface)
|
||||
phony.Block(m, func() {
|
||||
interfaces = m._interfaces
|
||||
for _, info := range m._interfaces {
|
||||
interfaces[info.iface.Name] = info.iface
|
||||
}
|
||||
})
|
||||
return interfaces
|
||||
}
|
||||
|
||||
func (m *Multicast) _updateInterfaces() {
|
||||
// getAllowedInterfaces returns the currently known/enabled multicast interfaces.
|
||||
func (m *Multicast) getAllowedInterfaces() map[string]net.Interface {
|
||||
interfaces := make(map[string]net.Interface)
|
||||
// Get interface expressions from config
|
||||
current := m.config.GetCurrent()
|
||||
|
@ -267,10 +166,11 @@ func (m *Multicast) _updateInterfaces() {
|
|||
// Ask the system for network interfaces
|
||||
allifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
// Don't panic, since this may be from e.g. too many open files (from too much connection spam)
|
||||
// TODO? log something
|
||||
return nil
|
||||
}
|
||||
// Work out which interfaces to announce on
|
||||
interfaceAddrs := make(map[string]addrInfo)
|
||||
for _, iface := range allifaces {
|
||||
if iface.Flags&net.FlagUp == 0 {
|
||||
// Ignore interfaces that are down
|
||||
|
@ -284,26 +184,6 @@ func (m *Multicast) _updateInterfaces() {
|
|||
// Ignore point-to-point interfaces
|
||||
continue
|
||||
}
|
||||
var aInfo addrInfo
|
||||
var isIn bool
|
||||
if aInfo, isIn = m._interfaceAddrs[iface.Name]; isIn && time.Since(aInfo.time) < time.Minute {
|
||||
// don't call iface.Addrs, it's unlikely things have changed
|
||||
} else {
|
||||
aInfo.addrs, _ = iface.Addrs()
|
||||
aInfo.time = time.Now()
|
||||
}
|
||||
lladdrs := aInfo.addrs[:0]
|
||||
for _, addr := range aInfo.addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() == nil && addrIP.IsLinkLocalUnicast() {
|
||||
lladdrs = append(lladdrs, addr)
|
||||
}
|
||||
}
|
||||
aInfo.addrs = lladdrs
|
||||
if len(lladdrs) == 0 {
|
||||
// Ignore interfaces without link-local addresses
|
||||
continue
|
||||
}
|
||||
for _, expr := range exprs {
|
||||
// Compile each regular expression
|
||||
e, err := regexp.Compile(expr)
|
||||
|
@ -313,16 +193,132 @@ func (m *Multicast) _updateInterfaces() {
|
|||
// Does the interface match the regular expression? Store it if so
|
||||
if e.MatchString(iface.Name) {
|
||||
interfaces[iface.Name] = iface
|
||||
interfaceAddrs[iface.Name] = aInfo
|
||||
}
|
||||
}
|
||||
}
|
||||
m._interfaces = interfaces
|
||||
m._interfaceAddrs = interfaceAddrs
|
||||
return interfaces
|
||||
}
|
||||
|
||||
func (m *Multicast) _announce() {
|
||||
if !m.isOpen {
|
||||
return
|
||||
}
|
||||
m._updateInterfaces()
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", m.groupAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
destAddr, err := net.ResolveUDPAddr("udp6", m.groupAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// There might be interfaces that we configured listeners for but are no
|
||||
// longer up - if that's the case then we should stop the listeners
|
||||
for name, info := range m.listeners {
|
||||
// Prepare our stop function!
|
||||
stop := func() {
|
||||
info.listener.Stop()
|
||||
delete(m.listeners, name)
|
||||
m.log.Debugln("No longer multicasting on", name)
|
||||
}
|
||||
// If the interface is no longer visible on the system then stop the
|
||||
// listener, as another one will be started further down
|
||||
if _, ok := m._interfaces[name]; !ok {
|
||||
stop()
|
||||
continue
|
||||
}
|
||||
// It's possible that the link-local listener address has changed so if
|
||||
// that is the case then we should clean up the interface listener
|
||||
found := false
|
||||
listenaddr, err := net.ResolveTCPAddr("tcp6", info.listener.Listener.Addr().String())
|
||||
if err != nil {
|
||||
stop()
|
||||
continue
|
||||
}
|
||||
// Find the interface that matches the listener
|
||||
if info, ok := m._interfaces[name]; ok {
|
||||
for _, addr := range info.addrs {
|
||||
if ip, _, err := net.ParseCIDR(addr.String()); err == nil {
|
||||
// Does the interface address match our listener address?
|
||||
if ip.Equal(listenaddr.IP) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the address has not been found on the adapter then we should stop
|
||||
// and clean up the TCP listener. A new one will be created below if a
|
||||
// suitable link-local address is found
|
||||
if !found {
|
||||
stop()
|
||||
}
|
||||
}
|
||||
// Now that we have a list of valid interfaces from the operating system,
|
||||
// we can start checking if we can send multicasts on them
|
||||
for _, info := range m._interfaces {
|
||||
iface := info.iface
|
||||
for _, addr := range info.addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
// Ignore IPv4 addresses
|
||||
if addrIP.To4() != nil {
|
||||
continue
|
||||
}
|
||||
// Ignore non-link-local addresses
|
||||
if !addrIP.IsLinkLocalUnicast() {
|
||||
continue
|
||||
}
|
||||
// Join the multicast group
|
||||
m.sock.JoinGroup(&iface, groupAddr)
|
||||
// Try and see if we already have a TCP listener for this interface
|
||||
var info *listenerInfo
|
||||
if nfo, ok := m.listeners[iface.Name]; !ok || nfo.listener.Listener == nil {
|
||||
// No listener was found - let's create one
|
||||
urlString := fmt.Sprintf("tcp://[%s]:%d", addrIP, m.listenPort)
|
||||
u, err := url.Parse(urlString)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if li, err := m.core.Listen(u, iface.Name); err == nil {
|
||||
m.log.Debugln("Started multicasting on", iface.Name)
|
||||
// Store the listener so that we can stop it later if needed
|
||||
info = &listenerInfo{listener: li, time: time.Now()}
|
||||
m.listeners[iface.Name] = info
|
||||
} else {
|
||||
m.log.Warnln("Not multicasting on", iface.Name, "due to error:", err)
|
||||
}
|
||||
} else {
|
||||
// An existing listener was found
|
||||
info = m.listeners[iface.Name]
|
||||
}
|
||||
// Make sure nothing above failed for some reason
|
||||
if info == nil {
|
||||
continue
|
||||
}
|
||||
if time.Since(info.time) < info.interval {
|
||||
continue
|
||||
}
|
||||
// Get the listener details and construct the multicast beacon
|
||||
lladdr := info.listener.Listener.Addr().String()
|
||||
if a, err := net.ResolveTCPAddr("tcp6", lladdr); err == nil {
|
||||
a.Zone = ""
|
||||
destAddr.Zone = iface.Name
|
||||
msg := []byte(a.String())
|
||||
m.sock.WriteTo(msg, nil, destAddr)
|
||||
}
|
||||
if info.interval.Seconds() < 15 {
|
||||
info.interval += time.Second
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
time.AfterFunc(time.Second, func() {
|
||||
m.Act(nil, m._announce)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Multicast) listen() {
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", GroupAddr)
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", m.groupAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -355,10 +351,17 @@ func (m *Multicast) listen() {
|
|||
if addr.IP.String() != from.IP.String() {
|
||||
continue
|
||||
}
|
||||
// Note that m.Interfaces would block if it was being run by the actor itself
|
||||
if _, ok := m.Interfaces()[from.Zone]; ok {
|
||||
var interfaces map[string]interfaceInfo
|
||||
phony.Block(m, func() {
|
||||
interfaces = m._interfaces
|
||||
})
|
||||
if _, ok := interfaces[from.Zone]; ok {
|
||||
addr.Zone = ""
|
||||
if err := m.core.CallPeer("tcp://"+addr.String(), from.Zone); err != nil {
|
||||
u, err := url.Parse("tcp://" + addr.String())
|
||||
if err != nil {
|
||||
m.log.Debugln("Call from multicast failed, parse error:", addr.String(), err)
|
||||
}
|
||||
if err := m.core.CallPeer(u, from.Zone); err != nil {
|
||||
m.log.Debugln("Call from multicast failed:", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,9 @@ import (
|
|||
var awdlGoroutineStarted bool
|
||||
|
||||
func (m *Multicast) _multicastStarted() {
|
||||
if !m.isOpen {
|
||||
return
|
||||
}
|
||||
C.StopAWDLBrowsing()
|
||||
for intf := range m._interfaces {
|
||||
if intf == "awdl0" {
|
||||
|
@ -39,7 +42,7 @@ func (m *Multicast) _multicastStarted() {
|
|||
break
|
||||
}
|
||||
}
|
||||
m.platformhandler = time.AfterFunc(time.Minute, func() {
|
||||
time.AfterFunc(time.Minute, func() {
|
||||
m.Act(nil, m._multicastStarted)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,118 +1,41 @@
|
|||
package tuntap
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/admin"
|
||||
)
|
||||
|
||||
func (t *TunAdapter) SetupAdminHandlers(a *admin.AdminSocket) {
|
||||
a.AddHandler("getTunTap", []string{}, func(in admin.Info) (r admin.Info, e error) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
r = admin.Info{"none": admin.Info{}}
|
||||
e = nil
|
||||
}
|
||||
}()
|
||||
type GetTUNRequest struct{}
|
||||
type GetTUNResponse map[string]TUNEntry
|
||||
|
||||
return admin.Info{
|
||||
t.Name(): admin.Info{
|
||||
"mtu": t.mtu,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
/*
|
||||
// TODO: rewrite this as I'm fairly sure it doesn't work right on many
|
||||
// platforms anyway, but it may require changes to Water
|
||||
a.AddHandler("setTunTap", []string{"name", "[tap_mode]", "[mtu]"}, func(in Info) (Info, error) {
|
||||
// Set sane defaults
|
||||
iftapmode := defaults.GetDefaults().DefaultIfTAPMode
|
||||
ifmtu := defaults.GetDefaults().DefaultIfMTU
|
||||
// Has TAP mode been specified?
|
||||
if tap, ok := in["tap_mode"]; ok {
|
||||
iftapmode = tap.(bool)
|
||||
}
|
||||
// Check we have enough params for MTU
|
||||
if mtu, ok := in["mtu"]; ok {
|
||||
if mtu.(float64) >= 1280 && ifmtu <= defaults.GetDefaults().MaximumIfMTU {
|
||||
ifmtu = int(in["mtu"].(float64))
|
||||
}
|
||||
}
|
||||
// Start the TUN adapter
|
||||
if err := a.startTunWithMTU(in["name"].(string), iftapmode, ifmtu); err != nil {
|
||||
return Info{}, errors.New("Failed to configure adapter")
|
||||
} else {
|
||||
return Info{
|
||||
a.core.router.tun.iface.Name(): Info{
|
||||
"tap_mode": a.core.router.tun.iface.IsTAP(),
|
||||
"mtu": ifmtu,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
})
|
||||
*/
|
||||
a.AddHandler("getTunnelRouting", []string{}, func(in admin.Info) (admin.Info, error) {
|
||||
return admin.Info{"enabled": t.ckr.isEnabled()}, nil
|
||||
})
|
||||
a.AddHandler("setTunnelRouting", []string{"enabled"}, func(in admin.Info) (admin.Info, error) {
|
||||
enabled := false
|
||||
if e, ok := in["enabled"].(bool); ok {
|
||||
enabled = e
|
||||
}
|
||||
t.ckr.setEnabled(enabled)
|
||||
return admin.Info{"enabled": enabled}, nil
|
||||
})
|
||||
a.AddHandler("addLocalSubnet", []string{"subnet"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.addLocalSubnet(in["subnet"].(string)); err == nil {
|
||||
return admin.Info{"added": []string{in["subnet"].(string)}}, nil
|
||||
} else {
|
||||
return admin.Info{"not_added": []string{in["subnet"].(string)}}, errors.New("Failed to add source subnet")
|
||||
}
|
||||
})
|
||||
a.AddHandler("addRemoteSubnet", []string{"subnet", "box_pub_key"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.addRemoteSubnet(in["subnet"].(string), in["box_pub_key"].(string)); err == nil {
|
||||
return admin.Info{"added": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, nil
|
||||
} else {
|
||||
return admin.Info{"not_added": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, errors.New("Failed to add route")
|
||||
}
|
||||
})
|
||||
a.AddHandler("getSourceSubnets", []string{}, func(in admin.Info) (admin.Info, error) {
|
||||
var subnets []string
|
||||
getSourceSubnets := func(snets []net.IPNet) {
|
||||
for _, subnet := range snets {
|
||||
subnets = append(subnets, subnet.String())
|
||||
}
|
||||
}
|
||||
getSourceSubnets(t.ckr.ipv4locals)
|
||||
getSourceSubnets(t.ckr.ipv6locals)
|
||||
return admin.Info{"source_subnets": subnets}, nil
|
||||
})
|
||||
a.AddHandler("getRoutes", []string{}, func(in admin.Info) (admin.Info, error) {
|
||||
routes := make(admin.Info)
|
||||
getRoutes := func(ckrs []cryptokey_route) {
|
||||
for _, ckr := range ckrs {
|
||||
routes[ckr.subnet.String()] = hex.EncodeToString(ckr.destination[:])
|
||||
}
|
||||
}
|
||||
getRoutes(t.ckr.ipv4remotes)
|
||||
getRoutes(t.ckr.ipv6remotes)
|
||||
return admin.Info{"routes": routes}, nil
|
||||
})
|
||||
a.AddHandler("removeLocalSubnet", []string{"subnet"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.removeLocalSubnet(in["subnet"].(string)); err == nil {
|
||||
return admin.Info{"removed": []string{in["subnet"].(string)}}, nil
|
||||
} else {
|
||||
return admin.Info{"not_removed": []string{in["subnet"].(string)}}, errors.New("Failed to remove source subnet")
|
||||
}
|
||||
})
|
||||
a.AddHandler("removeRemoteSubnet", []string{"subnet", "box_pub_key"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.removeRemoteSubnet(in["subnet"].(string), in["box_pub_key"].(string)); err == nil {
|
||||
return admin.Info{"removed": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, nil
|
||||
} else {
|
||||
return admin.Info{"not_removed": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, errors.New("Failed to remove route")
|
||||
}
|
||||
})
|
||||
type TUNEntry struct {
|
||||
MTU uint64 `json:"mtu"`
|
||||
}
|
||||
|
||||
func (t *TunAdapter) getTUNHandler(req *GetTUNRequest, res *GetTUNResponse) error {
|
||||
*res = GetTUNResponse{
|
||||
t.Name(): TUNEntry{
|
||||
MTU: t.MTU(),
|
||||
},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TunAdapter) SetupAdminHandlers(a *admin.AdminSocket) {
|
||||
_ = a.AddHandler("getTunTap", []string{}, func(in json.RawMessage) (interface{}, error) {
|
||||
req := &GetTUNRequest{}
|
||||
res := &GetTUNResponse{}
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := t.getTUNHandler(req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
_ = a.AddHandler("getNodeInfo", []string{"key"}, t.proto.nodeinfo.nodeInfoAdminHandler)
|
||||
_ = a.AddHandler("debug_remoteGetSelf", []string{"key"}, t.proto.getSelfHandler)
|
||||
_ = a.AddHandler("debug_remoteGetPeers", []string{"key"}, t.proto.getPeersHandler)
|
||||
_ = a.AddHandler("debug_remoteGetDHT", []string{"key"}, t.proto.getDHTHandler)
|
||||
}
|
||||
|
|
|
@ -1,430 +0,0 @@
|
|||
package tuntap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
)
|
||||
|
||||
// This module implements crypto-key routing, similar to Wireguard, where we
|
||||
// allow traffic for non-Yggdrasil ranges to be routed over Yggdrasil.
|
||||
|
||||
type cryptokey struct {
|
||||
tun *TunAdapter
|
||||
enabled atomic.Value // bool
|
||||
ipv4remotes []cryptokey_route
|
||||
ipv6remotes []cryptokey_route
|
||||
ipv4cache map[address.Address]cryptokey_route
|
||||
ipv6cache map[address.Address]cryptokey_route
|
||||
ipv4locals []net.IPNet
|
||||
ipv6locals []net.IPNet
|
||||
mutexremotes sync.RWMutex
|
||||
mutexcaches sync.RWMutex
|
||||
mutexlocals sync.RWMutex
|
||||
}
|
||||
|
||||
type cryptokey_route struct {
|
||||
subnet net.IPNet
|
||||
destination crypto.BoxPubKey
|
||||
}
|
||||
|
||||
// Initialise crypto-key routing. This must be done before any other CKR calls.
|
||||
func (c *cryptokey) init(tun *TunAdapter) {
|
||||
c.tun = tun
|
||||
c.configure()
|
||||
}
|
||||
|
||||
// Configure the CKR routes. This should only ever be ran by the TUN/TAP actor.
|
||||
func (c *cryptokey) configure() {
|
||||
current := c.tun.config.GetCurrent()
|
||||
|
||||
// Set enabled/disabled state
|
||||
c.setEnabled(current.TunnelRouting.Enable)
|
||||
|
||||
// Clear out existing routes
|
||||
c.mutexremotes.Lock()
|
||||
c.ipv6remotes = make([]cryptokey_route, 0)
|
||||
c.ipv4remotes = make([]cryptokey_route, 0)
|
||||
c.mutexremotes.Unlock()
|
||||
|
||||
// Add IPv6 routes
|
||||
for ipv6, pubkey := range current.TunnelRouting.IPv6RemoteSubnets {
|
||||
if err := c.addRemoteSubnet(ipv6, pubkey); err != nil {
|
||||
c.tun.log.Errorln("Error adding CKR IPv6 remote subnet:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add IPv4 routes
|
||||
for ipv4, pubkey := range current.TunnelRouting.IPv4RemoteSubnets {
|
||||
if err := c.addRemoteSubnet(ipv4, pubkey); err != nil {
|
||||
c.tun.log.Errorln("Error adding CKR IPv4 remote subnet:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Clear out existing sources
|
||||
c.mutexlocals.Lock()
|
||||
c.ipv6locals = make([]net.IPNet, 0)
|
||||
c.ipv4locals = make([]net.IPNet, 0)
|
||||
c.mutexlocals.Unlock()
|
||||
|
||||
// Add IPv6 sources
|
||||
c.ipv6locals = make([]net.IPNet, 0)
|
||||
for _, source := range current.TunnelRouting.IPv6LocalSubnets {
|
||||
if err := c.addLocalSubnet(source); err != nil {
|
||||
c.tun.log.Errorln("Error adding CKR IPv6 local subnet:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add IPv4 sources
|
||||
c.ipv4locals = make([]net.IPNet, 0)
|
||||
for _, source := range current.TunnelRouting.IPv4LocalSubnets {
|
||||
if err := c.addLocalSubnet(source); err != nil {
|
||||
c.tun.log.Errorln("Error adding CKR IPv4 local subnet:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wipe the caches
|
||||
c.mutexcaches.Lock()
|
||||
c.ipv4cache = make(map[address.Address]cryptokey_route, 0)
|
||||
c.ipv6cache = make(map[address.Address]cryptokey_route, 0)
|
||||
c.mutexcaches.Unlock()
|
||||
}
|
||||
|
||||
// Enable or disable crypto-key routing.
|
||||
func (c *cryptokey) setEnabled(enabled bool) {
|
||||
c.enabled.Store(enabled)
|
||||
}
|
||||
|
||||
// Check if crypto-key routing is enabled.
|
||||
func (c *cryptokey) isEnabled() bool {
|
||||
enabled, ok := c.enabled.Load().(bool)
|
||||
return ok && enabled
|
||||
}
|
||||
|
||||
// Check whether the given address (with the address length specified in bytes)
|
||||
// matches either the current node's address, the node's routed subnet or the
|
||||
// list of subnets specified in ipv4locals/ipv6locals.
|
||||
func (c *cryptokey) isValidLocalAddress(addr address.Address, addrlen int) bool {
|
||||
c.mutexlocals.RLock()
|
||||
defer c.mutexlocals.RUnlock()
|
||||
// Does it match a configured CKR source?
|
||||
if c.isEnabled() {
|
||||
ip := net.IP(addr[:addrlen])
|
||||
// Build our references to the routing sources
|
||||
var routingsources *[]net.IPNet
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if addrlen == net.IPv6len {
|
||||
routingsources = &c.ipv6locals
|
||||
} else if addrlen == net.IPv4len {
|
||||
routingsources = &c.ipv4locals
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, subnet := range *routingsources {
|
||||
if subnet.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Doesn't match any of the above
|
||||
return false
|
||||
}
|
||||
|
||||
// Adds a source subnet, which allows traffic with these source addresses to
|
||||
// be tunnelled using crypto-key routing.
|
||||
func (c *cryptokey) addLocalSubnet(cidr string) error {
|
||||
c.mutexlocals.Lock()
|
||||
defer c.mutexlocals.Unlock()
|
||||
|
||||
// Is the CIDR we've been given valid?
|
||||
_, ipnet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the prefix length and size
|
||||
_, prefixsize := ipnet.Mask.Size()
|
||||
|
||||
// Build our references to the routing sources
|
||||
var routingsources *[]net.IPNet
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if prefixsize == net.IPv6len*8 {
|
||||
routingsources = &c.ipv6locals
|
||||
} else if prefixsize == net.IPv4len*8 {
|
||||
routingsources = &c.ipv4locals
|
||||
} else {
|
||||
return errors.New("unexpected prefix size")
|
||||
}
|
||||
|
||||
// Check if we already have this CIDR
|
||||
for _, subnet := range *routingsources {
|
||||
if subnet.String() == ipnet.String() {
|
||||
return errors.New("local subnet already configured")
|
||||
}
|
||||
}
|
||||
|
||||
// Add the source subnet
|
||||
*routingsources = append(*routingsources, *ipnet)
|
||||
c.tun.log.Infoln("Added CKR local subnet", cidr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds a destination route for the given CIDR to be tunnelled to the node
|
||||
// with the given BoxPubKey.
|
||||
func (c *cryptokey) addRemoteSubnet(cidr string, dest string) error {
|
||||
c.mutexremotes.Lock()
|
||||
c.mutexcaches.Lock()
|
||||
defer c.mutexremotes.Unlock()
|
||||
defer c.mutexcaches.Unlock()
|
||||
|
||||
// Is the CIDR we've been given valid?
|
||||
ipaddr, ipnet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the prefix length and size
|
||||
_, prefixsize := ipnet.Mask.Size()
|
||||
|
||||
// Build our references to the routing table and cache
|
||||
var routingtable *[]cryptokey_route
|
||||
var routingcache *map[address.Address]cryptokey_route
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if prefixsize == net.IPv6len*8 {
|
||||
routingtable = &c.ipv6remotes
|
||||
routingcache = &c.ipv6cache
|
||||
} else if prefixsize == net.IPv4len*8 {
|
||||
routingtable = &c.ipv4remotes
|
||||
routingcache = &c.ipv4cache
|
||||
} else {
|
||||
return errors.New("unexpected prefix size")
|
||||
}
|
||||
|
||||
// Is the route an Yggdrasil destination?
|
||||
var addr address.Address
|
||||
var snet address.Subnet
|
||||
copy(addr[:], ipaddr)
|
||||
copy(snet[:], ipnet.IP)
|
||||
if addr.IsValid() || snet.IsValid() {
|
||||
return errors.New("can't specify Yggdrasil destination as crypto-key route")
|
||||
}
|
||||
// Do we already have a route for this subnet?
|
||||
for _, route := range *routingtable {
|
||||
if route.subnet.String() == ipnet.String() {
|
||||
return fmt.Errorf("remote subnet already exists for %s", cidr)
|
||||
}
|
||||
}
|
||||
// Decode the public key
|
||||
if bpk, err := hex.DecodeString(dest); err != nil {
|
||||
return err
|
||||
} else if len(bpk) != crypto.BoxPubKeyLen {
|
||||
return fmt.Errorf("incorrect key length for %s", dest)
|
||||
} else {
|
||||
// Add the new crypto-key route
|
||||
var key crypto.BoxPubKey
|
||||
copy(key[:], bpk)
|
||||
*routingtable = append(*routingtable, cryptokey_route{
|
||||
subnet: *ipnet,
|
||||
destination: key,
|
||||
})
|
||||
|
||||
// Sort so most specific routes are first
|
||||
sort.Slice(*routingtable, func(i, j int) bool {
|
||||
im, _ := (*routingtable)[i].subnet.Mask.Size()
|
||||
jm, _ := (*routingtable)[j].subnet.Mask.Size()
|
||||
return im > jm
|
||||
})
|
||||
|
||||
// Clear the cache as this route might change future routing
|
||||
// Setting an empty slice keeps the memory whereas nil invokes GC
|
||||
for k := range *routingcache {
|
||||
delete(*routingcache, k)
|
||||
}
|
||||
|
||||
c.tun.log.Infoln("Added CKR remote subnet", cidr)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Looks up the most specific route for the given address (with the address
|
||||
// length specified in bytes) from the crypto-key routing table. An error is
|
||||
// returned if the address is not suitable or no route was found.
|
||||
func (c *cryptokey) getPublicKeyForAddress(addr address.Address, addrlen int) (crypto.BoxPubKey, error) {
|
||||
|
||||
// Check if the address is a valid Yggdrasil address - if so it
|
||||
// is exempt from all CKR checking
|
||||
if addr.IsValid() {
|
||||
return crypto.BoxPubKey{}, errors.New("cannot look up CKR for Yggdrasil addresses")
|
||||
}
|
||||
|
||||
// Build our references to the routing table and cache
|
||||
var routingtable *[]cryptokey_route
|
||||
var routingcache *map[address.Address]cryptokey_route
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if addrlen == net.IPv6len {
|
||||
routingcache = &c.ipv6cache
|
||||
} else if addrlen == net.IPv4len {
|
||||
routingcache = &c.ipv4cache
|
||||
} else {
|
||||
return crypto.BoxPubKey{}, errors.New("unexpected prefix size")
|
||||
}
|
||||
|
||||
// Check if there's a cache entry for this addr
|
||||
c.mutexcaches.RLock()
|
||||
if route, ok := (*routingcache)[addr]; ok {
|
||||
c.mutexcaches.RUnlock()
|
||||
return route.destination, nil
|
||||
}
|
||||
c.mutexcaches.RUnlock()
|
||||
|
||||
c.mutexremotes.RLock()
|
||||
defer c.mutexremotes.RUnlock()
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if addrlen == net.IPv6len {
|
||||
routingtable = &c.ipv6remotes
|
||||
} else if addrlen == net.IPv4len {
|
||||
routingtable = &c.ipv4remotes
|
||||
} else {
|
||||
return crypto.BoxPubKey{}, errors.New("unexpected prefix size")
|
||||
}
|
||||
|
||||
// No cache was found - start by converting the address into a net.IP
|
||||
ip := make(net.IP, addrlen)
|
||||
copy(ip[:addrlen], addr[:])
|
||||
|
||||
// Check if we have a route. At this point c.ipv6remotes should be
|
||||
// pre-sorted so that the most specific routes are first
|
||||
for _, route := range *routingtable {
|
||||
// Does this subnet match the given IP?
|
||||
if route.subnet.Contains(ip) {
|
||||
c.mutexcaches.Lock()
|
||||
defer c.mutexcaches.Unlock()
|
||||
|
||||
// Check if the routing cache is above a certain size, if it is evict
|
||||
// a random entry so we can make room for this one. We take advantage
|
||||
// of the fact that the iteration order is random here
|
||||
for k := range *routingcache {
|
||||
if len(*routingcache) < 1024 {
|
||||
break
|
||||
}
|
||||
delete(*routingcache, k)
|
||||
}
|
||||
|
||||
// Cache the entry for future packets to get a faster lookup
|
||||
(*routingcache)[addr] = route
|
||||
|
||||
// Return the boxPubKey
|
||||
return route.destination, nil
|
||||
}
|
||||
}
|
||||
|
||||
// No route was found if we got to this point
|
||||
return crypto.BoxPubKey{}, fmt.Errorf("no route to %s", ip.String())
|
||||
}
|
||||
|
||||
// Removes a source subnet, which allows traffic with these source addresses to
|
||||
// be tunnelled using crypto-key routing.
|
||||
func (c *cryptokey) removeLocalSubnet(cidr string) error {
|
||||
c.mutexlocals.Lock()
|
||||
defer c.mutexlocals.Unlock()
|
||||
|
||||
// Is the CIDR we've been given valid?
|
||||
_, ipnet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the prefix length and size
|
||||
_, prefixsize := ipnet.Mask.Size()
|
||||
|
||||
// Build our references to the routing sources
|
||||
var routingsources *[]net.IPNet
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if prefixsize == net.IPv6len*8 {
|
||||
routingsources = &c.ipv6locals
|
||||
} else if prefixsize == net.IPv4len*8 {
|
||||
routingsources = &c.ipv4locals
|
||||
} else {
|
||||
return errors.New("unexpected prefix size")
|
||||
}
|
||||
|
||||
// Check if we already have this CIDR
|
||||
for idx, subnet := range *routingsources {
|
||||
if subnet.String() == ipnet.String() {
|
||||
*routingsources = append((*routingsources)[:idx], (*routingsources)[idx+1:]...)
|
||||
c.tun.log.Infoln("Removed CKR local subnet", cidr)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.New("local subnet not found")
|
||||
}
|
||||
|
||||
// Removes a destination route for the given CIDR to be tunnelled to the node
|
||||
// with the given BoxPubKey.
|
||||
func (c *cryptokey) removeRemoteSubnet(cidr string, dest string) error {
|
||||
c.mutexremotes.Lock()
|
||||
c.mutexcaches.Lock()
|
||||
defer c.mutexremotes.Unlock()
|
||||
defer c.mutexcaches.Unlock()
|
||||
|
||||
// Is the CIDR we've been given valid?
|
||||
_, ipnet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the prefix length and size
|
||||
_, prefixsize := ipnet.Mask.Size()
|
||||
|
||||
// Build our references to the routing table and cache
|
||||
var routingtable *[]cryptokey_route
|
||||
var routingcache *map[address.Address]cryptokey_route
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if prefixsize == net.IPv6len*8 {
|
||||
routingtable = &c.ipv6remotes
|
||||
routingcache = &c.ipv6cache
|
||||
} else if prefixsize == net.IPv4len*8 {
|
||||
routingtable = &c.ipv4remotes
|
||||
routingcache = &c.ipv4cache
|
||||
} else {
|
||||
return errors.New("unexpected prefix size")
|
||||
}
|
||||
|
||||
// Decode the public key
|
||||
bpk, err := hex.DecodeString(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(bpk) != crypto.BoxPubKeyLen {
|
||||
return fmt.Errorf("incorrect key length for %s", dest)
|
||||
}
|
||||
netStr := ipnet.String()
|
||||
|
||||
for idx, route := range *routingtable {
|
||||
if bytes.Equal(route.destination[:], bpk) && route.subnet.String() == netStr {
|
||||
*routingtable = append((*routingtable)[:idx], (*routingtable)[idx+1:]...)
|
||||
for k := range *routingcache {
|
||||
delete(*routingcache, k)
|
||||
}
|
||||
c.tun.log.Infof("Removed CKR remote subnet %s via %s\n", cidr, dest)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("route does not exists for %s", cidr)
|
||||
}
|
|
@ -1,232 +0,0 @@
|
|||
package tuntap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
"golang.org/x/net/icmp"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
const tunConnTimeout = 2 * time.Minute
|
||||
|
||||
type tunConn struct {
|
||||
phony.Inbox
|
||||
tun *TunAdapter
|
||||
conn *yggdrasil.Conn
|
||||
addr address.Address
|
||||
snet address.Subnet
|
||||
stop chan struct{}
|
||||
alive *time.Timer // From calling time.AfterFunc
|
||||
}
|
||||
|
||||
func (s *tunConn) close() {
|
||||
s.tun.Act(s, s._close_from_tun)
|
||||
}
|
||||
|
||||
func (s *tunConn) _close_from_tun() {
|
||||
go s.conn.Close() // Just in case it blocks on actor operations
|
||||
delete(s.tun.addrToConn, s.addr)
|
||||
delete(s.tun.subnetToConn, s.snet)
|
||||
func() {
|
||||
defer func() { recover() }()
|
||||
close(s.stop) // Closes reader/writer goroutines
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *tunConn) _read(bs []byte) (err error) {
|
||||
select {
|
||||
case <-s.stop:
|
||||
err = errors.New("session was already closed")
|
||||
util.PutBytes(bs)
|
||||
return
|
||||
default:
|
||||
}
|
||||
if len(bs) == 0 {
|
||||
err = errors.New("read packet with 0 size")
|
||||
util.PutBytes(bs)
|
||||
return
|
||||
}
|
||||
ipv4 := len(bs) > 20 && bs[0]&0xf0 == 0x40
|
||||
ipv6 := len(bs) > 40 && bs[0]&0xf0 == 0x60
|
||||
isCGA := true
|
||||
// Check source addresses
|
||||
switch {
|
||||
case ipv6 && bs[8] == 0x02 && bytes.Equal(s.addr[:16], bs[8:24]): // source
|
||||
case ipv6 && bs[8] == 0x03 && bytes.Equal(s.snet[:8], bs[8:16]): // source
|
||||
default:
|
||||
isCGA = false
|
||||
}
|
||||
// Check destination addresses
|
||||
switch {
|
||||
case ipv6 && bs[24] == 0x02 && bytes.Equal(s.tun.addr[:16], bs[24:40]): // destination
|
||||
case ipv6 && bs[24] == 0x03 && bytes.Equal(s.tun.subnet[:8], bs[24:32]): // destination
|
||||
default:
|
||||
isCGA = false
|
||||
}
|
||||
// Decide how to handle the packet
|
||||
var skip bool
|
||||
switch {
|
||||
case isCGA: // Allowed
|
||||
case s.tun.ckr.isEnabled() && (ipv4 || ipv6):
|
||||
var srcAddr address.Address
|
||||
var dstAddr address.Address
|
||||
var addrlen int
|
||||
if ipv4 {
|
||||
copy(srcAddr[:], bs[12:16])
|
||||
copy(dstAddr[:], bs[16:20])
|
||||
addrlen = 4
|
||||
}
|
||||
if ipv6 {
|
||||
copy(srcAddr[:], bs[8:24])
|
||||
copy(dstAddr[:], bs[24:40])
|
||||
addrlen = 16
|
||||
}
|
||||
if !s.tun.ckr.isValidLocalAddress(dstAddr, addrlen) {
|
||||
// The destination address isn't in our CKR allowed range
|
||||
skip = true
|
||||
} else if key, err := s.tun.ckr.getPublicKeyForAddress(srcAddr, addrlen); err == nil {
|
||||
if *s.conn.RemoteAddr().(*crypto.BoxPubKey) == key {
|
||||
// This is the one allowed CKR case, where source and destination addresses are both good
|
||||
} else {
|
||||
// The CKR key associated with this address doesn't match the sender's NodeID
|
||||
skip = true
|
||||
}
|
||||
} else {
|
||||
// We have no CKR route for this source address
|
||||
skip = true
|
||||
}
|
||||
default:
|
||||
skip = true
|
||||
}
|
||||
if skip {
|
||||
err = errors.New("address not allowed")
|
||||
util.PutBytes(bs)
|
||||
return
|
||||
}
|
||||
s.tun.writer.writeFrom(s, bs)
|
||||
s.stillAlive()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *tunConn) writeFrom(from phony.Actor, bs []byte) {
|
||||
s.Act(from, func() {
|
||||
s._write(bs)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *tunConn) _write(bs []byte) (err error) {
|
||||
select {
|
||||
case <-s.stop:
|
||||
err = errors.New("session was already closed")
|
||||
util.PutBytes(bs)
|
||||
return
|
||||
default:
|
||||
}
|
||||
v4 := len(bs) > 20 && bs[0]&0xf0 == 0x40
|
||||
v6 := len(bs) > 40 && bs[0]&0xf0 == 0x60
|
||||
isCGA := true
|
||||
// Check source addresses
|
||||
switch {
|
||||
case v6 && bs[8] == 0x02 && bytes.Equal(s.tun.addr[:16], bs[8:24]): // source
|
||||
case v6 && bs[8] == 0x03 && bytes.Equal(s.tun.subnet[:8], bs[8:16]): // source
|
||||
default:
|
||||
isCGA = false
|
||||
}
|
||||
// Check destiantion addresses
|
||||
switch {
|
||||
case v6 && bs[24] == 0x02 && bytes.Equal(s.addr[:16], bs[24:40]): // destination
|
||||
case v6 && bs[24] == 0x03 && bytes.Equal(s.snet[:8], bs[24:32]): // destination
|
||||
default:
|
||||
isCGA = false
|
||||
}
|
||||
// Decide how to handle the packet
|
||||
var skip bool
|
||||
switch {
|
||||
case isCGA: // Allowed
|
||||
case s.tun.ckr.isEnabled() && (v4 || v6):
|
||||
var srcAddr address.Address
|
||||
var dstAddr address.Address
|
||||
var addrlen int
|
||||
if v4 {
|
||||
copy(srcAddr[:], bs[12:16])
|
||||
copy(dstAddr[:], bs[16:20])
|
||||
addrlen = 4
|
||||
}
|
||||
if v6 {
|
||||
copy(srcAddr[:], bs[8:24])
|
||||
copy(dstAddr[:], bs[24:40])
|
||||
addrlen = 16
|
||||
}
|
||||
if !s.tun.ckr.isValidLocalAddress(srcAddr, addrlen) {
|
||||
// The source address isn't in our CKR allowed range
|
||||
skip = true
|
||||
} else if key, err := s.tun.ckr.getPublicKeyForAddress(dstAddr, addrlen); err == nil {
|
||||
if *s.conn.RemoteAddr().(*crypto.BoxPubKey) == key {
|
||||
// This is the one allowed CKR case, where source and destination addresses are both good
|
||||
} else {
|
||||
// The CKR key associated with this address doesn't match the sender's NodeID
|
||||
skip = true
|
||||
}
|
||||
} else {
|
||||
// We have no CKR route for this destination address... why do we have the packet in the first place?
|
||||
skip = true
|
||||
}
|
||||
default:
|
||||
skip = true
|
||||
}
|
||||
if skip {
|
||||
err = errors.New("address not allowed")
|
||||
util.PutBytes(bs)
|
||||
return
|
||||
}
|
||||
msg := yggdrasil.FlowKeyMessage{
|
||||
FlowKey: util.GetFlowKey(bs),
|
||||
Message: bs,
|
||||
}
|
||||
s.conn.WriteFrom(s, msg, func(err error) {
|
||||
if err == nil {
|
||||
// No point in wasting resources to send back an error if there was none
|
||||
return
|
||||
}
|
||||
s.Act(s.conn, func() {
|
||||
if e, eok := err.(yggdrasil.ConnError); !eok {
|
||||
if e.Closed() {
|
||||
s.tun.log.Debugln(s.conn.String(), "TUN/TAP generic write debug:", err)
|
||||
} else {
|
||||
s.tun.log.Errorln(s.conn.String(), "TUN/TAP generic write error:", err)
|
||||
}
|
||||
} else if e.PacketTooBig() {
|
||||
// TODO: This currently isn't aware of IPv4 for CKR
|
||||
ptb := &icmp.PacketTooBig{
|
||||
MTU: int(e.PacketMaximumSize()),
|
||||
Data: bs[:900],
|
||||
}
|
||||
if packet, err := CreateICMPv6(bs[8:24], bs[24:40], ipv6.ICMPTypePacketTooBig, 0, ptb); err == nil {
|
||||
s.tun.writer.writeFrom(s, packet)
|
||||
}
|
||||
} else {
|
||||
if e.Closed() {
|
||||
s.tun.log.Debugln(s.conn.String(), "TUN/TAP conn write debug:", err)
|
||||
} else {
|
||||
s.tun.log.Errorln(s.conn.String(), "TUN/TAP conn write error:", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
s.stillAlive()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *tunConn) stillAlive() {
|
||||
if s.alive != nil {
|
||||
s.alive.Stop()
|
||||
}
|
||||
s.alive = time.AfterFunc(tunConnTimeout, s.close)
|
||||
}
|
|
@ -17,11 +17,7 @@ import (
|
|||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
const len_ETHER = 14
|
||||
|
||||
type ICMPv6 struct {
|
||||
tun *TunAdapter
|
||||
}
|
||||
type ICMPv6 struct{}
|
||||
|
||||
// Marshal returns the binary encoding of h.
|
||||
func ipv6Header_Marshal(h *ipv6.Header) ([]byte, error) {
|
||||
|
@ -40,13 +36,6 @@ func ipv6Header_Marshal(h *ipv6.Header) ([]byte, error) {
|
|||
return b, nil
|
||||
}
|
||||
|
||||
// Initialises the ICMPv6 module by assigning our link-local IPv6 address and
|
||||
// our MAC address. ICMPv6 messages will always appear to originate from these
|
||||
// addresses.
|
||||
func (i *ICMPv6) Init(t *TunAdapter) {
|
||||
i.tun = t
|
||||
}
|
||||
|
||||
// Creates an ICMPv6 packet based on the given icmp.MessageBody and other
|
||||
// parameters, complete with IP headers only, which can be written directly to
|
||||
// a TUN adapter, or called directly by the CreateICMPv6L2 function when
|
||||
|
|
|
@ -1,210 +1,138 @@
|
|||
package tuntap
|
||||
|
||||
import (
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
"crypto/ed25519"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"golang.org/x/net/icmp"
|
||||
"golang.org/x/net/ipv6"
|
||||
|
||||
//"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
//"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
|
||||
//"golang.org/x/net/icmp"
|
||||
//"golang.org/x/net/ipv6"
|
||||
|
||||
iwt "github.com/Arceliar/ironwood/types"
|
||||
//"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
const TUN_OFFSET_BYTES = 4
|
||||
|
||||
type tunWriter struct {
|
||||
phony.Inbox
|
||||
tun *TunAdapter
|
||||
}
|
||||
|
||||
func (w *tunWriter) writeFrom(from phony.Actor, b []byte) {
|
||||
w.Act(from, func() {
|
||||
w._write(b)
|
||||
})
|
||||
}
|
||||
|
||||
// write is pretty loose with the memory safety rules, e.g. it assumes it can
|
||||
// read w.tun.iface.IsTap() safely
|
||||
func (w *tunWriter) _write(b []byte) {
|
||||
defer util.PutBytes(b)
|
||||
var written int
|
||||
var err error
|
||||
n := len(b)
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
temp := append(util.ResizeBytes(util.GetBytes(), TUN_OFFSET_BYTES), b...)
|
||||
defer util.PutBytes(temp)
|
||||
written, err = w.tun.iface.Write(temp, TUN_OFFSET_BYTES)
|
||||
if err != nil {
|
||||
w.tun.Act(w, func() {
|
||||
if !w.tun.isOpen {
|
||||
w.tun.log.Errorln("TUN iface write error:", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
if written != n+TUN_OFFSET_BYTES {
|
||||
// FIXME some platforms return the wrong number of bytes written, causing error spam
|
||||
//w.tun.log.Errorln("TUN iface write mismatch:", written, "bytes written vs", n+TUN_OFFSET_BYTES, "bytes given")
|
||||
}
|
||||
}
|
||||
|
||||
type tunReader struct {
|
||||
phony.Inbox
|
||||
tun *TunAdapter
|
||||
}
|
||||
|
||||
func (r *tunReader) _read() {
|
||||
// Get a slice to store the packet in
|
||||
recvd := util.ResizeBytes(util.GetBytes(), int(r.tun.mtu)+TUN_OFFSET_BYTES)
|
||||
// Wait for a packet to be delivered to us through the TUN adapter
|
||||
n, err := r.tun.iface.Read(recvd, TUN_OFFSET_BYTES)
|
||||
func (tun *TunAdapter) read() {
|
||||
var buf [TUN_OFFSET_BYTES + 65535]byte
|
||||
for {
|
||||
n, err := tun.iface.Read(buf[:], TUN_OFFSET_BYTES)
|
||||
if n <= TUN_OFFSET_BYTES || err != nil {
|
||||
r.tun.log.Errorln("Error reading TUN:", err)
|
||||
ferr := r.tun.iface.Flush()
|
||||
tun.log.Errorln("Error reading TUN:", err)
|
||||
ferr := tun.iface.Flush()
|
||||
if ferr != nil {
|
||||
r.tun.log.Errorln("Unable to flush packets:", ferr)
|
||||
tun.log.Errorln("Unable to flush packets:", ferr)
|
||||
}
|
||||
util.PutBytes(recvd)
|
||||
} else {
|
||||
r.tun.handlePacketFrom(r, recvd[TUN_OFFSET_BYTES:n+TUN_OFFSET_BYTES], err)
|
||||
}
|
||||
if err == nil {
|
||||
// Now read again
|
||||
r.Act(nil, r._read)
|
||||
}
|
||||
}
|
||||
|
||||
func (tun *TunAdapter) handlePacketFrom(from phony.Actor, packet []byte, err error) {
|
||||
tun.Act(from, func() {
|
||||
tun._handlePacket(packet, err)
|
||||
})
|
||||
}
|
||||
|
||||
// does the work of reading a packet and sending it to the correct tunConn
|
||||
func (tun *TunAdapter) _handlePacket(recvd []byte, err error) {
|
||||
if err != nil {
|
||||
tun.log.Errorln("TUN iface read error:", err)
|
||||
return
|
||||
}
|
||||
// Offset the buffer from now on so that we can ignore ethernet frames if
|
||||
// they are present
|
||||
bs := recvd[:]
|
||||
// Check if the packet is long enough to detect if it's an ICMP packet or not
|
||||
if len(bs) < 7 {
|
||||
tun.log.Traceln("TUN iface read undersized unknown packet, length:", len(bs))
|
||||
return
|
||||
begin := TUN_OFFSET_BYTES
|
||||
end := begin + n
|
||||
bs := buf[begin:end]
|
||||
if bs[0]&0xf0 != 0x60 {
|
||||
continue // not IPv6
|
||||
}
|
||||
// From the IP header, work out what our source and destination addresses
|
||||
// and node IDs are. We will need these in order to work out where to send
|
||||
// the packet
|
||||
var dstAddr address.Address
|
||||
var dstSnet address.Subnet
|
||||
var addrlen int
|
||||
n := len(bs)
|
||||
// Check the IP protocol - if it doesn't match then we drop the packet and
|
||||
// do nothing with it
|
||||
if bs[0]&0xf0 == 0x60 {
|
||||
// Check if we have a fully-sized IPv6 header
|
||||
if len(bs) < 40 {
|
||||
tun.log.Traceln("TUN iface read undersized ipv6 packet, length:", len(bs))
|
||||
return
|
||||
continue
|
||||
}
|
||||
// Check the packet size
|
||||
if n-tun_IPv6_HEADER_LENGTH != 256*int(bs[4])+int(bs[5]) {
|
||||
return
|
||||
var srcAddr, dstAddr address.Address
|
||||
var srcSubnet, dstSubnet address.Subnet
|
||||
copy(srcAddr[:], bs[8:])
|
||||
copy(dstAddr[:], bs[24:])
|
||||
copy(srcSubnet[:], bs[8:])
|
||||
copy(dstSubnet[:], bs[24:])
|
||||
if srcAddr != tun.addr && srcSubnet != tun.subnet {
|
||||
continue // Wrong soruce address
|
||||
}
|
||||
// IPv6 address
|
||||
addrlen = 16
|
||||
copy(dstAddr[:addrlen], bs[24:])
|
||||
copy(dstSnet[:addrlen/2], bs[24:])
|
||||
} else if bs[0]&0xf0 == 0x40 {
|
||||
// Check if we have a fully-sized IPv4 header
|
||||
if len(bs) < 20 {
|
||||
tun.log.Traceln("TUN iface read undersized ipv4 packet, length:", len(bs))
|
||||
return
|
||||
}
|
||||
// Check the packet size
|
||||
if n != 256*int(bs[2])+int(bs[3]) {
|
||||
return
|
||||
}
|
||||
// IPv4 address
|
||||
addrlen = 4
|
||||
copy(dstAddr[:addrlen], bs[16:])
|
||||
} else {
|
||||
// Unknown address length or protocol, so drop the packet and ignore it
|
||||
tun.log.Traceln("Unknown packet type, dropping")
|
||||
return
|
||||
}
|
||||
if tun.ckr.isEnabled() {
|
||||
if addrlen != 16 || (!dstAddr.IsValid() && !dstSnet.IsValid()) {
|
||||
if key, err := tun.ckr.getPublicKeyForAddress(dstAddr, addrlen); err == nil {
|
||||
// A public key was found, get the node ID for the search
|
||||
dstNodeID := crypto.GetNodeID(&key)
|
||||
dstAddr = *address.AddrForNodeID(dstNodeID)
|
||||
dstSnet = *address.SubnetForNodeID(dstNodeID)
|
||||
addrlen = 16
|
||||
}
|
||||
}
|
||||
}
|
||||
if addrlen != 16 || (!dstAddr.IsValid() && !dstSnet.IsValid()) {
|
||||
// Couldn't find this node's ygg IP
|
||||
return
|
||||
}
|
||||
// Do we have an active connection for this node address?
|
||||
var dstString string
|
||||
session, isIn := tun.addrToConn[dstAddr]
|
||||
if !isIn || session == nil {
|
||||
session, isIn = tun.subnetToConn[dstSnet]
|
||||
if !isIn || session == nil {
|
||||
// Neither an address nor a subnet mapping matched, therefore populate
|
||||
// the node ID and mask to commence a search
|
||||
bs = buf[begin-1 : end]
|
||||
bs[0] = typeSessionTraffic
|
||||
if dstAddr.IsValid() {
|
||||
dstString = dstAddr.GetNodeIDLengthString()
|
||||
} else {
|
||||
dstString = dstSnet.GetNodeIDLengthString()
|
||||
tun.store.sendToAddress(dstAddr, bs)
|
||||
} else if dstSubnet.IsValid() {
|
||||
tun.store.sendToSubnet(dstSubnet, bs)
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we don't have a connection then we should open one
|
||||
if !isIn || session == nil {
|
||||
// Check we haven't been given empty node ID, really this shouldn't ever
|
||||
// happen but just to be sure...
|
||||
if dstString == "" {
|
||||
panic("Given empty dstString - this shouldn't happen")
|
||||
}
|
||||
_, known := tun.dials[dstString]
|
||||
tun.dials[dstString] = append(tun.dials[dstString], bs)
|
||||
for len(tun.dials[dstString]) > 32 {
|
||||
util.PutBytes(tun.dials[dstString][0])
|
||||
tun.dials[dstString] = tun.dials[dstString][1:]
|
||||
}
|
||||
if !known {
|
||||
go func() {
|
||||
conn, err := tun.dialer.Dial("nodeid", dstString)
|
||||
tun.Act(nil, func() {
|
||||
packets := tun.dials[dstString]
|
||||
delete(tun.dials, dstString)
|
||||
}
|
||||
|
||||
func (tun *TunAdapter) write() {
|
||||
var buf [TUN_OFFSET_BYTES + 65535]byte
|
||||
for {
|
||||
bs := buf[TUN_OFFSET_BYTES-1:]
|
||||
n, from, err := tun.core.ReadFrom(bs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// We've been given a connection so prepare the session wrapper
|
||||
var tc *tunConn
|
||||
if tc, err = tun._wrap(conn.(*yggdrasil.Conn)); err != nil {
|
||||
// Something went wrong when storing the connection, typically that
|
||||
// something already exists for this address or subnet
|
||||
tun.log.Debugln("TUN iface wrap:", err)
|
||||
return
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
for _, packet := range packets {
|
||||
tc.writeFrom(nil, packet)
|
||||
switch bs[0] {
|
||||
case typeSessionTraffic:
|
||||
// This is what we want to handle here
|
||||
if !tun.isEnabled {
|
||||
continue // Drop traffic if the tun is disabled
|
||||
}
|
||||
case typeSessionProto:
|
||||
var key keyArray
|
||||
copy(key[:], from.(iwt.Addr))
|
||||
data := append([]byte(nil), bs[1:n]...)
|
||||
tun.proto.handleProto(nil, key, data)
|
||||
continue
|
||||
default:
|
||||
continue
|
||||
}
|
||||
bs = bs[1:n]
|
||||
if len(bs) == 0 {
|
||||
continue
|
||||
}
|
||||
if bs[0]&0xf0 != 0x60 {
|
||||
continue // not IPv6
|
||||
}
|
||||
if len(bs) < 40 {
|
||||
continue
|
||||
}
|
||||
if len(bs) > int(tun.MTU()) {
|
||||
ptb := &icmp.PacketTooBig{
|
||||
MTU: int(tun.mtu),
|
||||
Data: bs[:40],
|
||||
}
|
||||
if packet, err := CreateICMPv6(bs[8:24], bs[24:40], ipv6.ICMPTypePacketTooBig, 0, ptb); err == nil {
|
||||
_, _ = tun.core.WriteTo(packet, from)
|
||||
}
|
||||
continue
|
||||
}
|
||||
var srcAddr, dstAddr address.Address
|
||||
var srcSubnet, dstSubnet address.Subnet
|
||||
copy(srcAddr[:], bs[8:])
|
||||
copy(dstAddr[:], bs[24:])
|
||||
copy(srcSubnet[:], bs[8:])
|
||||
copy(dstSubnet[:], bs[24:])
|
||||
if dstAddr != tun.addr && dstSubnet != tun.subnet {
|
||||
continue // bad local address/subnet
|
||||
}
|
||||
info := tun.store.update(ed25519.PublicKey(from.(iwt.Addr)))
|
||||
if info == nil {
|
||||
continue // Blocked by the gatekeeper
|
||||
}
|
||||
if srcAddr != info.address && srcSubnet != info.subnet {
|
||||
continue // bad remote address/subnet
|
||||
}
|
||||
bs = buf[:TUN_OFFSET_BYTES+len(bs)]
|
||||
n, err = tun.iface.Write(bs, TUN_OFFSET_BYTES)
|
||||
if err != nil {
|
||||
tun.Act(nil, func() {
|
||||
if !tun.isOpen {
|
||||
tun.log.Errorln("TUN iface write error:", err)
|
||||
}
|
||||
})
|
||||
return
|
||||
}()
|
||||
}
|
||||
if n != len(bs) {
|
||||
// TODO some kind of error reporting for a partial write
|
||||
}
|
||||
// If we have a connection now, try writing to it
|
||||
if isIn && session != nil {
|
||||
session.writeFrom(tun, bs)
|
||||
}
|
||||
}
|
||||
|
|
169
src/tuntap/keystore.go
Normal file
169
src/tuntap/keystore.go
Normal file
|
@ -0,0 +1,169 @@
|
|||
package tuntap
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
iwt "github.com/Arceliar/ironwood/types"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
)
|
||||
|
||||
const keyStoreTimeout = 2 * time.Minute
|
||||
|
||||
type keyStore struct {
|
||||
tun *TunAdapter
|
||||
mutex sync.Mutex
|
||||
keyToInfo map[keyArray]*keyInfo
|
||||
addrToInfo map[address.Address]*keyInfo
|
||||
addrBuffer map[address.Address]*buffer
|
||||
subnetToInfo map[address.Subnet]*keyInfo
|
||||
subnetBuffer map[address.Subnet]*buffer
|
||||
}
|
||||
|
||||
type keyArray [ed25519.PublicKeySize]byte
|
||||
|
||||
type keyInfo struct {
|
||||
key keyArray
|
||||
address address.Address
|
||||
subnet address.Subnet
|
||||
timeout *time.Timer // From calling a time.AfterFunc to do cleanup
|
||||
}
|
||||
|
||||
type buffer struct {
|
||||
packets [][]byte
|
||||
timeout *time.Timer
|
||||
}
|
||||
|
||||
func (k *keyStore) init(tun *TunAdapter) {
|
||||
k.tun = tun
|
||||
k.keyToInfo = make(map[keyArray]*keyInfo)
|
||||
k.addrToInfo = make(map[address.Address]*keyInfo)
|
||||
k.addrBuffer = make(map[address.Address]*buffer)
|
||||
k.subnetToInfo = make(map[address.Subnet]*keyInfo)
|
||||
k.subnetBuffer = make(map[address.Subnet]*buffer)
|
||||
}
|
||||
|
||||
func (k *keyStore) sendToAddress(addr address.Address, bs []byte) {
|
||||
k.mutex.Lock()
|
||||
if info := k.addrToInfo[addr]; info != nil {
|
||||
k.resetTimeout(info)
|
||||
k.mutex.Unlock()
|
||||
k.tun.core.WriteTo(bs, iwt.Addr(info.key[:]))
|
||||
} else {
|
||||
var buf *buffer
|
||||
if buf = k.addrBuffer[addr]; buf == nil {
|
||||
buf = new(buffer)
|
||||
k.addrBuffer[addr] = buf
|
||||
}
|
||||
msg := append([]byte(nil), bs...)
|
||||
buf.packets = append(buf.packets, msg)
|
||||
if buf.timeout != nil {
|
||||
buf.timeout.Stop()
|
||||
}
|
||||
buf.timeout = time.AfterFunc(keyStoreTimeout, func() {
|
||||
k.mutex.Lock()
|
||||
defer k.mutex.Unlock()
|
||||
if nbuf := k.addrBuffer[addr]; nbuf == buf {
|
||||
delete(k.addrBuffer, addr)
|
||||
}
|
||||
})
|
||||
k.mutex.Unlock()
|
||||
k.tun.sendKeyLookup(addr.GetKey())
|
||||
}
|
||||
}
|
||||
|
||||
func (k *keyStore) sendToSubnet(subnet address.Subnet, bs []byte) {
|
||||
k.mutex.Lock()
|
||||
if info := k.subnetToInfo[subnet]; info != nil {
|
||||
k.resetTimeout(info)
|
||||
k.mutex.Unlock()
|
||||
k.tun.core.WriteTo(bs, iwt.Addr(info.key[:]))
|
||||
} else {
|
||||
var buf *buffer
|
||||
if buf = k.subnetBuffer[subnet]; buf == nil {
|
||||
buf = new(buffer)
|
||||
k.subnetBuffer[subnet] = buf
|
||||
}
|
||||
msg := append([]byte(nil), bs...)
|
||||
buf.packets = append(buf.packets, msg)
|
||||
if buf.timeout != nil {
|
||||
buf.timeout.Stop()
|
||||
}
|
||||
buf.timeout = time.AfterFunc(keyStoreTimeout, func() {
|
||||
k.mutex.Lock()
|
||||
defer k.mutex.Unlock()
|
||||
if nbuf := k.subnetBuffer[subnet]; nbuf == buf {
|
||||
delete(k.subnetBuffer, subnet)
|
||||
}
|
||||
})
|
||||
k.mutex.Unlock()
|
||||
k.tun.sendKeyLookup(subnet.GetKey())
|
||||
}
|
||||
}
|
||||
|
||||
func (k *keyStore) update(key ed25519.PublicKey) *keyInfo {
|
||||
k.mutex.Lock()
|
||||
var kArray keyArray
|
||||
copy(kArray[:], key)
|
||||
var info *keyInfo
|
||||
if info = k.keyToInfo[kArray]; info == nil {
|
||||
info = new(keyInfo)
|
||||
info.key = kArray
|
||||
info.address = *address.AddrForKey(ed25519.PublicKey(info.key[:]))
|
||||
info.subnet = *address.SubnetForKey(ed25519.PublicKey(info.key[:]))
|
||||
var isOutgoing bool
|
||||
if k.addrBuffer[info.address] != nil {
|
||||
isOutgoing = true
|
||||
}
|
||||
if k.subnetBuffer[info.subnet] != nil {
|
||||
isOutgoing = true
|
||||
}
|
||||
if !k.tun.gatekeeper(key, isOutgoing) {
|
||||
// Blocked by the gatekeeper, so don't create an entry for this
|
||||
k.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
k.keyToInfo[info.key] = info
|
||||
k.addrToInfo[info.address] = info
|
||||
k.subnetToInfo[info.subnet] = info
|
||||
k.resetTimeout(info)
|
||||
k.mutex.Unlock()
|
||||
if buf := k.addrBuffer[info.address]; buf != nil {
|
||||
for _, bs := range buf.packets {
|
||||
k.tun.core.WriteTo(bs, iwt.Addr(info.key[:]))
|
||||
}
|
||||
delete(k.addrBuffer, info.address)
|
||||
}
|
||||
if buf := k.subnetBuffer[info.subnet]; buf != nil {
|
||||
for _, bs := range buf.packets {
|
||||
k.tun.core.WriteTo(bs, iwt.Addr(info.key[:]))
|
||||
}
|
||||
delete(k.subnetBuffer, info.subnet)
|
||||
}
|
||||
} else {
|
||||
k.resetTimeout(info)
|
||||
k.mutex.Unlock()
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func (k *keyStore) resetTimeout(info *keyInfo) {
|
||||
if info.timeout != nil {
|
||||
info.timeout.Stop()
|
||||
}
|
||||
info.timeout = time.AfterFunc(keyStoreTimeout, func() {
|
||||
k.mutex.Lock()
|
||||
defer k.mutex.Unlock()
|
||||
if nfo := k.keyToInfo[info.key]; nfo == info {
|
||||
delete(k.keyToInfo, info.key)
|
||||
}
|
||||
if nfo := k.addrToInfo[info.address]; nfo == info {
|
||||
delete(k.addrToInfo, info.address)
|
||||
}
|
||||
if nfo := k.subnetToInfo[info.subnet]; nfo == info {
|
||||
delete(k.subnetToInfo, info.subnet)
|
||||
}
|
||||
})
|
||||
}
|
189
src/tuntap/nodeinfo.go
Normal file
189
src/tuntap/nodeinfo.go
Normal file
|
@ -0,0 +1,189 @@
|
|||
package tuntap
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
iwt "github.com/Arceliar/ironwood/types"
|
||||
"github.com/Arceliar/phony"
|
||||
|
||||
//"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
||||
)
|
||||
|
||||
// NodeInfoPayload represents a RequestNodeInfo response, in bytes.
|
||||
type NodeInfoPayload []byte
|
||||
|
||||
type nodeinfo struct {
|
||||
phony.Inbox
|
||||
proto *protoHandler
|
||||
myNodeInfo NodeInfoPayload
|
||||
callbacks map[keyArray]nodeinfoCallback
|
||||
}
|
||||
|
||||
type nodeinfoCallback struct {
|
||||
call func(nodeinfo NodeInfoPayload)
|
||||
created time.Time
|
||||
}
|
||||
|
||||
// Initialises the nodeinfo cache/callback maps, and starts a goroutine to keep
|
||||
// the cache/callback maps clean of stale entries
|
||||
func (m *nodeinfo) init(proto *protoHandler) {
|
||||
m.Act(nil, func() {
|
||||
m._init(proto)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _init(proto *protoHandler) {
|
||||
m.proto = proto
|
||||
m.callbacks = make(map[keyArray]nodeinfoCallback)
|
||||
m._cleanup()
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _cleanup() {
|
||||
for boxPubKey, callback := range m.callbacks {
|
||||
if time.Since(callback.created) > time.Minute {
|
||||
delete(m.callbacks, boxPubKey)
|
||||
}
|
||||
}
|
||||
time.AfterFunc(time.Second*30, func() {
|
||||
m.Act(nil, m._cleanup)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _addCallback(sender keyArray, call func(nodeinfo NodeInfoPayload)) {
|
||||
m.callbacks[sender] = nodeinfoCallback{
|
||||
created: time.Now(),
|
||||
call: call,
|
||||
}
|
||||
}
|
||||
|
||||
// Handles the callback, if there is one
|
||||
func (m *nodeinfo) _callback(sender keyArray, nodeinfo NodeInfoPayload) {
|
||||
if callback, ok := m.callbacks[sender]; ok {
|
||||
callback.call(nodeinfo)
|
||||
delete(m.callbacks, sender)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _getNodeInfo() NodeInfoPayload {
|
||||
return m.myNodeInfo
|
||||
}
|
||||
|
||||
// Set the current node's nodeinfo
|
||||
func (m *nodeinfo) setNodeInfo(given interface{}, privacy bool) (err error) {
|
||||
phony.Block(m, func() {
|
||||
err = m._setNodeInfo(given, privacy)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _setNodeInfo(given interface{}, privacy bool) error {
|
||||
defaults := map[string]interface{}{
|
||||
"buildname": version.BuildName(),
|
||||
"buildversion": version.BuildVersion(),
|
||||
"buildplatform": runtime.GOOS,
|
||||
"buildarch": runtime.GOARCH,
|
||||
}
|
||||
newnodeinfo := make(map[string]interface{})
|
||||
if !privacy {
|
||||
for k, v := range defaults {
|
||||
newnodeinfo[k] = v
|
||||
}
|
||||
}
|
||||
if nodeinfomap, ok := given.(map[string]interface{}); ok {
|
||||
for key, value := range nodeinfomap {
|
||||
if _, ok := defaults[key]; ok {
|
||||
if strvalue, strok := value.(string); strok && strings.EqualFold(strvalue, "null") || value == nil {
|
||||
delete(newnodeinfo, key)
|
||||
}
|
||||
continue
|
||||
}
|
||||
newnodeinfo[key] = value
|
||||
}
|
||||
}
|
||||
newjson, err := json.Marshal(newnodeinfo)
|
||||
if err == nil {
|
||||
if len(newjson) > 16384 {
|
||||
return errors.New("NodeInfo exceeds max length of 16384 bytes")
|
||||
}
|
||||
m.myNodeInfo = newjson
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *nodeinfo) sendReq(from phony.Actor, key keyArray, callback func(nodeinfo NodeInfoPayload)) {
|
||||
m.Act(from, func() {
|
||||
m._sendReq(key, callback)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _sendReq(key keyArray, callback func(nodeinfo NodeInfoPayload)) {
|
||||
if callback != nil {
|
||||
m._addCallback(key, callback)
|
||||
}
|
||||
m.proto.tun.core.WriteTo([]byte{typeSessionProto, typeProtoNodeInfoRequest}, iwt.Addr(key[:]))
|
||||
}
|
||||
|
||||
func (m *nodeinfo) handleReq(from phony.Actor, key keyArray) {
|
||||
m.Act(from, func() {
|
||||
m._sendRes(key)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) handleRes(from phony.Actor, key keyArray, info NodeInfoPayload) {
|
||||
m.Act(from, func() {
|
||||
m._callback(key, info)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _sendRes(key keyArray) {
|
||||
bs := append([]byte{typeSessionProto, typeProtoNodeInfoResponse}, m._getNodeInfo()...)
|
||||
m.proto.tun.core.WriteTo(bs, iwt.Addr(key[:]))
|
||||
}
|
||||
|
||||
// Admin socket stuff
|
||||
|
||||
type GetNodeInfoRequest struct {
|
||||
Key string `json:"key"`
|
||||
}
|
||||
type GetNodeInfoResponse map[string]interface{}
|
||||
|
||||
func (m *nodeinfo) nodeInfoAdminHandler(in json.RawMessage) (interface{}, error) {
|
||||
var req GetNodeInfoRequest
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var key keyArray
|
||||
var kbs []byte
|
||||
var err error
|
||||
if kbs, err = hex.DecodeString(req.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(key[:], kbs)
|
||||
ch := make(chan []byte, 1)
|
||||
m.sendReq(nil, key, func(info NodeInfoPayload) {
|
||||
ch <- info
|
||||
})
|
||||
timer := time.NewTimer(6 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return nil, errors.New("timeout")
|
||||
case info := <-ch:
|
||||
var msg json.RawMessage
|
||||
if err := msg.UnmarshalJSON(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ip := net.IP(address.AddrForKey(kbs)[:])
|
||||
res := GetNodeInfoResponse{ip.String(): msg}
|
||||
return res, nil
|
||||
}
|
||||
}
|
349
src/tuntap/proto.go
Normal file
349
src/tuntap/proto.go
Normal file
|
@ -0,0 +1,349 @@
|
|||
package tuntap
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
iwt "github.com/Arceliar/ironwood/types"
|
||||
"github.com/Arceliar/phony"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
)
|
||||
|
||||
const (
|
||||
typeDebugDummy = iota
|
||||
typeDebugGetSelfRequest
|
||||
typeDebugGetSelfResponse
|
||||
typeDebugGetPeersRequest
|
||||
typeDebugGetPeersResponse
|
||||
typeDebugGetDHTRequest
|
||||
typeDebugGetDHTResponse
|
||||
)
|
||||
|
||||
type reqInfo struct {
|
||||
callback func([]byte)
|
||||
timer *time.Timer // time.AfterFunc cleanup
|
||||
}
|
||||
|
||||
type protoHandler struct {
|
||||
phony.Inbox
|
||||
tun *TunAdapter
|
||||
nodeinfo nodeinfo
|
||||
sreqs map[keyArray]*reqInfo
|
||||
preqs map[keyArray]*reqInfo
|
||||
dreqs map[keyArray]*reqInfo
|
||||
}
|
||||
|
||||
func (p *protoHandler) init(tun *TunAdapter) {
|
||||
p.tun = tun
|
||||
p.nodeinfo.init(p)
|
||||
p.sreqs = make(map[keyArray]*reqInfo)
|
||||
p.preqs = make(map[keyArray]*reqInfo)
|
||||
p.dreqs = make(map[keyArray]*reqInfo)
|
||||
}
|
||||
|
||||
func (p *protoHandler) handleProto(from phony.Actor, key keyArray, bs []byte) {
|
||||
if len(bs) == 0 {
|
||||
return
|
||||
}
|
||||
switch bs[0] {
|
||||
case typeProtoDummy:
|
||||
case typeProtoNodeInfoRequest:
|
||||
p.nodeinfo.handleReq(p, key)
|
||||
case typeProtoNodeInfoResponse:
|
||||
p.nodeinfo.handleRes(p, key, bs[1:])
|
||||
case typeProtoDebug:
|
||||
p._handleDebug(key, bs[1:])
|
||||
}
|
||||
}
|
||||
|
||||
func (p *protoHandler) _handleDebug(key keyArray, bs []byte) {
|
||||
if len(bs) == 0 {
|
||||
return
|
||||
}
|
||||
switch bs[0] {
|
||||
case typeDebugDummy:
|
||||
case typeDebugGetSelfRequest:
|
||||
p._handleGetSelfRequest(key)
|
||||
case typeDebugGetSelfResponse:
|
||||
p._handleGetSelfResponse(key, bs[1:])
|
||||
case typeDebugGetPeersRequest:
|
||||
p._handleGetPeersRequest(key)
|
||||
case typeDebugGetPeersResponse:
|
||||
p._handleGetPeersResponse(key, bs[1:])
|
||||
case typeDebugGetDHTRequest:
|
||||
p._handleGetDHTRequest(key)
|
||||
case typeDebugGetDHTResponse:
|
||||
p._handleGetDHTResponse(key, bs[1:])
|
||||
}
|
||||
}
|
||||
|
||||
func (p *protoHandler) sendGetSelfRequest(key keyArray, callback func([]byte)) {
|
||||
p.Act(nil, func() {
|
||||
if info := p.sreqs[key]; info != nil {
|
||||
info.timer.Stop()
|
||||
delete(p.sreqs, key)
|
||||
}
|
||||
info := new(reqInfo)
|
||||
info.callback = callback
|
||||
info.timer = time.AfterFunc(time.Minute, func() {
|
||||
p.Act(nil, func() {
|
||||
if p.sreqs[key] == info {
|
||||
delete(p.sreqs, key)
|
||||
}
|
||||
})
|
||||
})
|
||||
p.sreqs[key] = info
|
||||
p._sendDebug(key, typeDebugGetSelfRequest, nil)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *protoHandler) _handleGetSelfRequest(key keyArray) {
|
||||
self := p.tun.core.GetSelf()
|
||||
res := map[string]string{
|
||||
"key": hex.EncodeToString(self.Key[:]),
|
||||
"coords": fmt.Sprintf("%v", self.Coords),
|
||||
}
|
||||
bs, err := json.Marshal(res) // FIXME this puts keys in base64, not hex
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p._sendDebug(key, typeDebugGetSelfResponse, bs)
|
||||
}
|
||||
|
||||
func (p *protoHandler) _handleGetSelfResponse(key keyArray, bs []byte) {
|
||||
if info := p.sreqs[key]; info != nil {
|
||||
info.timer.Stop()
|
||||
info.callback(bs)
|
||||
delete(p.sreqs, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *protoHandler) sendGetPeersRequest(key keyArray, callback func([]byte)) {
|
||||
p.Act(nil, func() {
|
||||
if info := p.preqs[key]; info != nil {
|
||||
info.timer.Stop()
|
||||
delete(p.preqs, key)
|
||||
}
|
||||
info := new(reqInfo)
|
||||
info.callback = callback
|
||||
info.timer = time.AfterFunc(time.Minute, func() {
|
||||
p.Act(nil, func() {
|
||||
if p.preqs[key] == info {
|
||||
delete(p.preqs, key)
|
||||
}
|
||||
})
|
||||
})
|
||||
p.preqs[key] = info
|
||||
p._sendDebug(key, typeDebugGetPeersRequest, nil)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *protoHandler) _handleGetPeersRequest(key keyArray) {
|
||||
peers := p.tun.core.GetPeers()
|
||||
var bs []byte
|
||||
for _, pinfo := range peers {
|
||||
tmp := append(bs, pinfo.Key[:]...)
|
||||
const responseOverhead = 2 // 1 debug type, 1 getpeers type
|
||||
if uint64(len(tmp))+responseOverhead > p.tun.maxSessionMTU() {
|
||||
break
|
||||
}
|
||||
bs = tmp
|
||||
}
|
||||
p._sendDebug(key, typeDebugGetPeersResponse, bs)
|
||||
}
|
||||
|
||||
func (p *protoHandler) _handleGetPeersResponse(key keyArray, bs []byte) {
|
||||
if info := p.preqs[key]; info != nil {
|
||||
info.timer.Stop()
|
||||
info.callback(bs)
|
||||
delete(p.preqs, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *protoHandler) sendGetDHTRequest(key keyArray, callback func([]byte)) {
|
||||
p.Act(nil, func() {
|
||||
if info := p.dreqs[key]; info != nil {
|
||||
info.timer.Stop()
|
||||
delete(p.dreqs, key)
|
||||
}
|
||||
info := new(reqInfo)
|
||||
info.callback = callback
|
||||
info.timer = time.AfterFunc(time.Minute, func() {
|
||||
p.Act(nil, func() {
|
||||
if p.dreqs[key] == info {
|
||||
delete(p.dreqs, key)
|
||||
}
|
||||
})
|
||||
})
|
||||
p.dreqs[key] = info
|
||||
p._sendDebug(key, typeDebugGetDHTRequest, nil)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *protoHandler) _handleGetDHTRequest(key keyArray) {
|
||||
dinfos := p.tun.core.GetDHT()
|
||||
var bs []byte
|
||||
for _, dinfo := range dinfos {
|
||||
tmp := append(bs, dinfo.Key[:]...)
|
||||
const responseOverhead = 2 // 1 debug type, 1 getdht type
|
||||
if uint64(len(tmp))+responseOverhead > p.tun.maxSessionMTU() {
|
||||
break
|
||||
}
|
||||
bs = tmp
|
||||
}
|
||||
p._sendDebug(key, typeDebugGetDHTResponse, bs)
|
||||
}
|
||||
|
||||
func (p *protoHandler) _handleGetDHTResponse(key keyArray, bs []byte) {
|
||||
if info := p.dreqs[key]; info != nil {
|
||||
info.timer.Stop()
|
||||
info.callback(bs)
|
||||
delete(p.dreqs, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *protoHandler) _sendDebug(key keyArray, dType uint8, data []byte) {
|
||||
bs := append([]byte{typeSessionProto, typeProtoDebug, dType}, data...)
|
||||
p.tun.core.WriteTo(bs, iwt.Addr(key[:]))
|
||||
}
|
||||
|
||||
// Admin socket stuff
|
||||
|
||||
type DebugGetSelfRequest struct {
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
type DebugGetSelfResponse map[string]interface{}
|
||||
|
||||
func (p *protoHandler) getSelfHandler(in json.RawMessage) (interface{}, error) {
|
||||
var req DebugGetSelfRequest
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var key keyArray
|
||||
var kbs []byte
|
||||
var err error
|
||||
if kbs, err = hex.DecodeString(req.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(key[:], kbs)
|
||||
ch := make(chan []byte, 1)
|
||||
p.sendGetSelfRequest(key, func(info []byte) {
|
||||
ch <- info
|
||||
})
|
||||
timer := time.NewTimer(6 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return nil, errors.New("timeout")
|
||||
case info := <-ch:
|
||||
var msg json.RawMessage
|
||||
if err := msg.UnmarshalJSON(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ip := net.IP(address.AddrForKey(kbs)[:])
|
||||
res := DebugGetSelfResponse{ip.String(): msg}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
type DebugGetPeersRequest struct {
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
type DebugGetPeersResponse map[string]interface{}
|
||||
|
||||
func (p *protoHandler) getPeersHandler(in json.RawMessage) (interface{}, error) {
|
||||
var req DebugGetPeersRequest
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var key keyArray
|
||||
var kbs []byte
|
||||
var err error
|
||||
if kbs, err = hex.DecodeString(req.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(key[:], kbs)
|
||||
ch := make(chan []byte, 1)
|
||||
p.sendGetPeersRequest(key, func(info []byte) {
|
||||
ch <- info
|
||||
})
|
||||
timer := time.NewTimer(6 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return nil, errors.New("timeout")
|
||||
case info := <-ch:
|
||||
ks := make(map[string][]string)
|
||||
bs := info
|
||||
for len(bs) >= len(key) {
|
||||
ks["keys"] = append(ks["keys"], hex.EncodeToString(bs[:len(key)]))
|
||||
bs = bs[len(key):]
|
||||
}
|
||||
js, err := json.Marshal(ks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var msg json.RawMessage
|
||||
if err := msg.UnmarshalJSON(js); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ip := net.IP(address.AddrForKey(kbs)[:])
|
||||
res := DebugGetPeersResponse{ip.String(): msg}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
type DebugGetDHTRequest struct {
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
type DebugGetDHTResponse map[string]interface{}
|
||||
|
||||
func (p *protoHandler) getDHTHandler(in json.RawMessage) (interface{}, error) {
|
||||
var req DebugGetDHTRequest
|
||||
if err := json.Unmarshal(in, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var key keyArray
|
||||
var kbs []byte
|
||||
var err error
|
||||
if kbs, err = hex.DecodeString(req.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(key[:], kbs)
|
||||
ch := make(chan []byte, 1)
|
||||
p.sendGetDHTRequest(key, func(info []byte) {
|
||||
ch <- info
|
||||
})
|
||||
timer := time.NewTimer(6 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return nil, errors.New("timeout")
|
||||
case info := <-ch:
|
||||
ks := make(map[string][]string)
|
||||
bs := info
|
||||
for len(bs) >= len(key) {
|
||||
ks["keys"] = append(ks["keys"], hex.EncodeToString(bs[:len(key)]))
|
||||
bs = bs[len(key):]
|
||||
}
|
||||
js, err := json.Marshal(ks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var msg json.RawMessage
|
||||
if err := msg.UnmarshalJSON(js); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ip := net.IP(address.AddrForKey(kbs)[:])
|
||||
res := DebugGetDHTResponse{ip.String(): msg}
|
||||
return res, nil
|
||||
}
|
||||
}
|
|
@ -9,7 +9,7 @@ package tuntap
|
|||
// TODO: Don't block in reader on writes that are pending searches
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"crypto/ed25519"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
@ -22,51 +22,42 @@ import (
|
|||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/core"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/defaults"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/types"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
)
|
||||
|
||||
type MTU = types.MTU
|
||||
|
||||
const tun_IPv6_HEADER_LENGTH = 40
|
||||
type MTU uint16
|
||||
|
||||
// TunAdapter represents a running TUN interface and extends the
|
||||
// yggdrasil.Adapter type. In order to use the TUN adapter with Yggdrasil, you
|
||||
// should pass this object to the yggdrasil.SetRouterAdapter() function before
|
||||
// calling yggdrasil.Start().
|
||||
type TunAdapter struct {
|
||||
core *yggdrasil.Core
|
||||
writer tunWriter
|
||||
reader tunReader
|
||||
core *core.Core
|
||||
store keyStore
|
||||
config *config.NodeState
|
||||
log *log.Logger
|
||||
reconfigure chan chan error
|
||||
listener *yggdrasil.Listener
|
||||
dialer *yggdrasil.Dialer
|
||||
addr address.Address
|
||||
subnet address.Subnet
|
||||
ckr cryptokey
|
||||
icmpv6 ICMPv6
|
||||
mtu MTU
|
||||
mtu uint64
|
||||
iface tun.Device
|
||||
phony.Inbox // Currently only used for _handlePacket from the reader, TODO: all the stuff that currently needs a mutex below
|
||||
//mutex sync.RWMutex // Protects the below
|
||||
addrToConn map[address.Address]*tunConn
|
||||
subnetToConn map[address.Subnet]*tunConn
|
||||
dials map[string][][]byte // Buffer of packets to send after dialing finishes
|
||||
isOpen bool
|
||||
isEnabled bool // Used by the writer to drop sessionTraffic if not enabled
|
||||
gatekeeper func(pubkey ed25519.PublicKey, initiator bool) bool
|
||||
proto protoHandler
|
||||
}
|
||||
|
||||
type TunOptions struct {
|
||||
Listener *yggdrasil.Listener
|
||||
Dialer *yggdrasil.Dialer
|
||||
func (tun *TunAdapter) SetSessionGatekeeper(gatekeeper func(pubkey ed25519.PublicKey, initiator bool) bool) {
|
||||
phony.Block(tun, func() {
|
||||
tun.gatekeeper = gatekeeper
|
||||
})
|
||||
}
|
||||
|
||||
// Gets the maximum supported MTU for the platform based on the defaults in
|
||||
// defaults.GetDefaults().
|
||||
func getSupportedMTU(mtu MTU) MTU {
|
||||
func getSupportedMTU(mtu uint64) uint64 {
|
||||
if mtu < 1280 {
|
||||
return 1280
|
||||
}
|
||||
|
@ -88,7 +79,7 @@ func (tun *TunAdapter) Name() string {
|
|||
// MTU gets the adapter's MTU. This can range between 1280 and 65535, although
|
||||
// the maximum value is determined by your platform. The returned value will
|
||||
// never exceed that of MaximumMTU().
|
||||
func (tun *TunAdapter) MTU() MTU {
|
||||
func (tun *TunAdapter) MTU() uint64 {
|
||||
return getSupportedMTU(tun.mtu)
|
||||
}
|
||||
|
||||
|
@ -99,34 +90,29 @@ func DefaultName() string {
|
|||
|
||||
// DefaultMTU gets the default TUN interface MTU for your platform. This can
|
||||
// be as high as MaximumMTU(), depending on platform, but is never lower than 1280.
|
||||
func DefaultMTU() MTU {
|
||||
func DefaultMTU() uint64 {
|
||||
return defaults.GetDefaults().DefaultIfMTU
|
||||
}
|
||||
|
||||
// MaximumMTU returns the maximum supported TUN interface MTU for your
|
||||
// platform. This can be as high as 65535, depending on platform, but is never
|
||||
// lower than 1280.
|
||||
func MaximumMTU() MTU {
|
||||
func MaximumMTU() uint64 {
|
||||
return defaults.GetDefaults().MaximumIfMTU
|
||||
}
|
||||
|
||||
// Init initialises the TUN module. You must have acquired a Listener from
|
||||
// the Yggdrasil core before this point and it must not be in use elsewhere.
|
||||
func (tun *TunAdapter) Init(core *yggdrasil.Core, config *config.NodeState, log *log.Logger, options interface{}) error {
|
||||
tunoptions, ok := options.(TunOptions)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid options supplied to TunAdapter module")
|
||||
}
|
||||
func (tun *TunAdapter) Init(core *core.Core, config *config.NodeState, log *log.Logger, options interface{}) error {
|
||||
tun.core = core
|
||||
tun.store.init(tun)
|
||||
tun.config = config
|
||||
tun.log = log
|
||||
tun.listener = tunoptions.Listener
|
||||
tun.dialer = tunoptions.Dialer
|
||||
tun.addrToConn = make(map[address.Address]*tunConn)
|
||||
tun.subnetToConn = make(map[address.Subnet]*tunConn)
|
||||
tun.dials = make(map[string][][]byte)
|
||||
tun.writer.tun = tun
|
||||
tun.reader.tun = tun
|
||||
tun.proto.init(tun)
|
||||
tun.proto.nodeinfo.setNodeInfo(config.Current.NodeInfo, config.Current.NodeInfoPrivacy)
|
||||
if err := tun.core.SetOutOfBandHandler(tun.oobHandler); err != nil {
|
||||
return fmt.Errorf("tun.core.SetOutOfBandHander: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -145,34 +131,34 @@ func (tun *TunAdapter) _start() error {
|
|||
return errors.New("TUN module is already started")
|
||||
}
|
||||
current := tun.config.GetCurrent()
|
||||
if tun.config == nil || tun.listener == nil || tun.dialer == nil {
|
||||
if tun.config == nil {
|
||||
return errors.New("no configuration available to TUN")
|
||||
}
|
||||
var boxPub crypto.BoxPubKey
|
||||
boxPubHex, err := hex.DecodeString(current.EncryptionPublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(boxPub[:], boxPubHex)
|
||||
nodeID := crypto.GetNodeID(&boxPub)
|
||||
tun.addr = *address.AddrForNodeID(nodeID)
|
||||
tun.subnet = *address.SubnetForNodeID(nodeID)
|
||||
sk := tun.core.PrivateKey()
|
||||
pk := sk.Public().(ed25519.PublicKey)
|
||||
tun.addr = *address.AddrForKey(pk)
|
||||
tun.subnet = *address.SubnetForKey(pk)
|
||||
addr := fmt.Sprintf("%s/%d", net.IP(tun.addr[:]).String(), 8*len(address.GetPrefix())-1)
|
||||
if current.IfName == "none" || current.IfName == "dummy" {
|
||||
tun.log.Debugln("Not starting TUN as ifname is none or dummy")
|
||||
tun.isEnabled = false
|
||||
go tun.write()
|
||||
return nil
|
||||
}
|
||||
if err := tun.setup(current.IfName, addr, current.IfMTU); err != nil {
|
||||
mtu := current.IfMTU
|
||||
if tun.maxSessionMTU() < mtu {
|
||||
mtu = tun.maxSessionMTU()
|
||||
}
|
||||
if err := tun.setup(current.IfName, addr, mtu); err != nil {
|
||||
return err
|
||||
}
|
||||
if tun.MTU() != current.IfMTU {
|
||||
if tun.MTU() != mtu {
|
||||
tun.log.Warnf("Warning: Interface MTU %d automatically adjusted to %d (supported range is 1280-%d)", current.IfMTU, tun.MTU(), MaximumMTU())
|
||||
}
|
||||
tun.core.SetMaximumSessionMTU(tun.MTU())
|
||||
tun.isOpen = true
|
||||
go tun.handler()
|
||||
tun.reader.Act(nil, tun.reader._read) // Start the reader
|
||||
tun.ckr.init(tun)
|
||||
tun.isEnabled = true
|
||||
go tun.read()
|
||||
go tun.write()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -205,79 +191,41 @@ func (tun *TunAdapter) _stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// UpdateConfig updates the TUN module with the provided config.NodeConfig
|
||||
// and then signals the various module goroutines to reconfigure themselves if
|
||||
// needed.
|
||||
func (tun *TunAdapter) UpdateConfig(config *config.NodeConfig) {
|
||||
tun.log.Debugln("Reloading TUN configuration...")
|
||||
|
||||
// Replace the active configuration with the supplied one
|
||||
tun.config.Replace(*config)
|
||||
|
||||
// If the MTU has changed in the TUN module then this is where we would
|
||||
// tell the router so that updated session pings can be sent. However, we
|
||||
// don't currently update the MTU of the adapter once it has been created so
|
||||
// this doesn't actually happen in the real world yet.
|
||||
// tun.core.SetMaximumSessionMTU(...)
|
||||
|
||||
// Notify children about the configuration change
|
||||
tun.Act(nil, tun.ckr.configure)
|
||||
}
|
||||
|
||||
func (tun *TunAdapter) handler() error {
|
||||
for {
|
||||
// Accept the incoming connection
|
||||
conn, err := tun.listener.Accept()
|
||||
if err != nil {
|
||||
tun.log.Errorln("TUN connection accept error:", err)
|
||||
return err
|
||||
func (tun *TunAdapter) oobHandler(fromKey, toKey ed25519.PublicKey, data []byte) {
|
||||
if len(data) != 1+ed25519.SignatureSize {
|
||||
return
|
||||
}
|
||||
phony.Block(tun, func() {
|
||||
if _, err := tun._wrap(conn.(*yggdrasil.Conn)); err != nil {
|
||||
// Something went wrong when storing the connection, typically that
|
||||
// something already exists for this address or subnet
|
||||
tun.log.Debugln("TUN handler wrap:", err)
|
||||
sig := data[1:]
|
||||
switch data[0] {
|
||||
case typeKeyLookup:
|
||||
snet := *address.SubnetForKey(toKey)
|
||||
if snet == tun.subnet && ed25519.Verify(fromKey, toKey[:], sig) {
|
||||
// This is looking for at least our subnet (possibly our address)
|
||||
// Send a response
|
||||
tun.sendKeyResponse(fromKey)
|
||||
}
|
||||
case typeKeyResponse:
|
||||
// TODO keep a list of something to match against...
|
||||
// Ignore the response if it doesn't match anything of interest...
|
||||
if ed25519.Verify(fromKey, toKey[:], sig) {
|
||||
tun.store.update(fromKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (tun *TunAdapter) _wrap(conn *yggdrasil.Conn) (c *tunConn, err error) {
|
||||
// Prepare a session wrapper for the given connection
|
||||
s := tunConn{
|
||||
tun: tun,
|
||||
conn: conn,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
c = &s
|
||||
// Get the remote address and subnet of the other side
|
||||
remotePubKey := conn.RemoteAddr().(*crypto.BoxPubKey)
|
||||
remoteNodeID := crypto.GetNodeID(remotePubKey)
|
||||
s.addr = *address.AddrForNodeID(remoteNodeID)
|
||||
s.snet = *address.SubnetForNodeID(remoteNodeID)
|
||||
// Work out if this is already a destination we already know about
|
||||
atc, aok := tun.addrToConn[s.addr]
|
||||
stc, sok := tun.subnetToConn[s.snet]
|
||||
// If we know about a connection for this destination already then assume it
|
||||
// is no longer valid and close it
|
||||
if aok {
|
||||
atc._close_from_tun()
|
||||
err = errors.New("replaced connection for address")
|
||||
} else if sok {
|
||||
stc._close_from_tun()
|
||||
err = errors.New("replaced connection for subnet")
|
||||
}
|
||||
// Save the session wrapper so that we can look it up quickly next time
|
||||
// we receive a packet through the interface for this address
|
||||
tun.addrToConn[s.addr] = &s
|
||||
tun.subnetToConn[s.snet] = &s
|
||||
// Set the read callback and start the timeout
|
||||
conn.SetReadCallback(func(bs []byte) {
|
||||
s.Act(conn, func() {
|
||||
s._read(bs)
|
||||
})
|
||||
})
|
||||
s.Act(nil, s.stillAlive)
|
||||
// Return
|
||||
return c, err
|
||||
func (tun *TunAdapter) sendKeyLookup(partial ed25519.PublicKey) {
|
||||
sig := ed25519.Sign(tun.core.PrivateKey(), partial[:])
|
||||
bs := append([]byte{typeKeyLookup}, sig...)
|
||||
tun.core.SendOutOfBand(partial, bs)
|
||||
}
|
||||
|
||||
func (tun *TunAdapter) sendKeyResponse(dest ed25519.PublicKey) {
|
||||
sig := ed25519.Sign(tun.core.PrivateKey(), dest[:])
|
||||
bs := append([]byte{typeKeyResponse}, sig...)
|
||||
tun.core.SendOutOfBand(dest, bs)
|
||||
}
|
||||
|
||||
func (tun *TunAdapter) maxSessionMTU() uint64 {
|
||||
const sessionTypeOverhead = 1
|
||||
return tun.core.MTU() - sessionTypeOverhead
|
||||
}
|
||||
|
|
|
@ -73,14 +73,14 @@ type in6_ifreq_lifetime struct {
|
|||
}
|
||||
|
||||
// Configures the TUN adapter with the correct IPv6 address and MTU.
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
|
||||
iface, err := wgtun.CreateTUN(ifname, int(mtu))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tun.iface = iface
|
||||
if mtu, err := iface.MTU(); err == nil {
|
||||
tun.mtu = getSupportedMTU(MTU(mtu))
|
||||
tun.mtu = getSupportedMTU(uint64(mtu))
|
||||
} else {
|
||||
tun.mtu = 0
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
// Configures the "utun" adapter with the correct IPv6 address and MTU.
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
|
||||
if ifname == "auto" {
|
||||
ifname = "utun"
|
||||
}
|
||||
|
@ -25,8 +25,8 @@ func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
|
|||
panic(err)
|
||||
}
|
||||
tun.iface = iface
|
||||
if mtu, err := iface.MTU(); err == nil {
|
||||
tun.mtu = getSupportedMTU(MTU(mtu))
|
||||
if m, err := iface.MTU(); err == nil {
|
||||
tun.mtu = getSupportedMTU(uint64(m))
|
||||
} else {
|
||||
tun.mtu = 0
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
// Configures the TUN adapter with the correct IPv6 address and MTU.
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
|
||||
if ifname == "auto" {
|
||||
ifname = "\000"
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
|
|||
}
|
||||
tun.iface = iface
|
||||
if mtu, err := iface.MTU(); err == nil {
|
||||
tun.mtu = getSupportedMTU(MTU(mtu))
|
||||
tun.mtu = getSupportedMTU(uint64(mtu))
|
||||
} else {
|
||||
tun.mtu = 0
|
||||
}
|
||||
|
|
|
@ -10,14 +10,14 @@ import (
|
|||
)
|
||||
|
||||
// Configures the TUN adapter with the correct IPv6 address and MTU.
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu int) error {
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
|
||||
iface, err := wgtun.CreateTUN(ifname, mtu)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tun.iface = iface
|
||||
if mtu, err := iface.MTU(); err == nil {
|
||||
tun.mtu = getSupportedMTU(mtu)
|
||||
tun.mtu = getSupportedMTU(uint64(mtu))
|
||||
} else {
|
||||
tun.mtu = 0
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
// This is to catch Windows platforms
|
||||
|
||||
// Configures the TUN adapter with the correct IPv6 address and MTU.
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
|
||||
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
|
||||
if ifname == "auto" {
|
||||
ifname = defaults.GetDefaults().DefaultIfName
|
||||
}
|
||||
|
@ -43,14 +43,14 @@ func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
|
|||
return err
|
||||
}
|
||||
if mtu, err := iface.MTU(); err == nil {
|
||||
tun.mtu = MTU(mtu)
|
||||
tun.mtu = uint64(mtu)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Sets the MTU of the TAP adapter.
|
||||
func (tun *TunAdapter) setupMTU(mtu MTU) error {
|
||||
func (tun *TunAdapter) setupMTU(mtu uint64) error {
|
||||
if tun.iface == nil || tun.Name() == "" {
|
||||
return errors.New("Can't configure MTU as TUN adapter is not present")
|
||||
}
|
||||
|
|
23
src/tuntap/types.go
Normal file
23
src/tuntap/types.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package tuntap
|
||||
|
||||
// Out-of-band packet types
|
||||
const (
|
||||
typeKeyDummy = iota
|
||||
typeKeyLookup
|
||||
typeKeyResponse
|
||||
)
|
||||
|
||||
// In-band packet types
|
||||
const (
|
||||
typeSessionDummy = iota
|
||||
typeSessionTraffic
|
||||
typeSessionProto
|
||||
)
|
||||
|
||||
// Protocol packet types
|
||||
const (
|
||||
typeProtoDummy = iota
|
||||
typeProtoNodeInfoRequest
|
||||
typeProtoNodeInfoResponse
|
||||
typeProtoDebug = 255
|
||||
)
|
|
@ -1,3 +0,0 @@
|
|||
package types
|
||||
|
||||
type MTU uint16
|
|
@ -1,21 +0,0 @@
|
|||
//+build mobile
|
||||
|
||||
package util
|
||||
|
||||
import "runtime/debug"
|
||||
|
||||
func init() {
|
||||
debug.SetGCPercent(25)
|
||||
}
|
||||
|
||||
// GetBytes always returns a nil slice on mobile platforms.
|
||||
func GetBytes() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBytes does literally nothing on mobile platforms.
|
||||
// This is done rather than keeping a free list of bytes on platforms with memory constraints.
|
||||
// It's needed to help keep memory usage low enough to fall under the limits set for e.g. iOS NEPacketTunnelProvider apps.
|
||||
func PutBytes(bs []byte) {
|
||||
return
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
//+build !mobile
|
||||
|
||||
package util
|
||||
|
||||
import "sync"
|
||||
|
||||
// This is used to buffer recently used slices of bytes, to prevent allocations in the hot loops.
|
||||
var byteStore = sync.Pool{New: func() interface{} { return []byte(nil) }}
|
||||
|
||||
// GetBytes returns a 0-length (possibly nil) slice of bytes from a free list, so it may have a larger capacity.
|
||||
func GetBytes() []byte {
|
||||
return byteStore.Get().([]byte)[:0]
|
||||
}
|
||||
|
||||
// PutBytes stores a slice in a free list, where it can potentially be reused to prevent future allocations.
|
||||
func PutBytes(bs []byte) {
|
||||
byteStore.Put(bs)
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cancellation is used to signal when things should shut down, such as signaling anything associated with a Conn to exit.
|
||||
// This is and is similar to a context, but with an error to specify the reason for the cancellation.
|
||||
type Cancellation interface {
|
||||
Finished() <-chan struct{} // Finished returns a channel which will be closed when Cancellation.Cancel is first called.
|
||||
Cancel(error) error // Cancel closes the channel returned by Finished and sets the error returned by error, or else returns the existing error if the Cancellation has already run.
|
||||
Error() error // Error returns the error provided to Cancel, or nil if no error has been provided.
|
||||
}
|
||||
|
||||
// CancellationFinalized is an error returned if a cancellation object was garbage collected and the finalizer was run.
|
||||
// If you ever see this, then you're probably doing something wrong with your code.
|
||||
var CancellationFinalized = errors.New("finalizer called")
|
||||
|
||||
// CancellationTimeoutError is used when a CancellationWithTimeout or CancellationWithDeadline is cancelled due to said timeout.
|
||||
var CancellationTimeoutError = errors.New("timeout")
|
||||
|
||||
// CancellationFinalizer is set as a finalizer when creating a new cancellation with NewCancellation(), and generally shouldn't be needed by the user, but is included in case other implementations of the same interface want to make use of it.
|
||||
func CancellationFinalizer(c Cancellation) {
|
||||
c.Cancel(CancellationFinalized)
|
||||
}
|
||||
|
||||
type cancellation struct {
|
||||
cancel chan struct{}
|
||||
mutex sync.RWMutex
|
||||
err error
|
||||
done bool
|
||||
}
|
||||
|
||||
// NewCancellation returns a pointer to a struct satisfying the Cancellation interface.
|
||||
func NewCancellation() Cancellation {
|
||||
c := cancellation{
|
||||
cancel: make(chan struct{}),
|
||||
}
|
||||
runtime.SetFinalizer(&c, CancellationFinalizer)
|
||||
return &c
|
||||
}
|
||||
|
||||
// Finished returns a channel which will be closed when Cancellation.Cancel is first called.
|
||||
func (c *cancellation) Finished() <-chan struct{} {
|
||||
return c.cancel
|
||||
}
|
||||
|
||||
// Cancel closes the channel returned by Finished and sets the error returned by error, or else returns the existing error if the Cancellation has already run.
|
||||
func (c *cancellation) Cancel(err error) error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
if c.done {
|
||||
return c.err
|
||||
} else {
|
||||
c.err = err
|
||||
c.done = true
|
||||
close(c.cancel)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the error provided to Cancel, or nil if no error has been provided.
|
||||
func (c *cancellation) Error() error {
|
||||
c.mutex.RLock()
|
||||
err := c.err
|
||||
c.mutex.RUnlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// CancellationChild returns a new Cancellation which can be Cancelled independently of the parent, but which will also be Cancelled if the parent is Cancelled first.
|
||||
func CancellationChild(parent Cancellation) Cancellation {
|
||||
child := NewCancellation()
|
||||
go func() {
|
||||
select {
|
||||
case <-child.Finished():
|
||||
case <-parent.Finished():
|
||||
child.Cancel(parent.Error())
|
||||
}
|
||||
}()
|
||||
return child
|
||||
}
|
||||
|
||||
// CancellationWithTimeout returns a ChildCancellation that will automatically be Cancelled with a CancellationTimeoutError after the timeout.
|
||||
func CancellationWithTimeout(parent Cancellation, timeout time.Duration) Cancellation {
|
||||
child := CancellationChild(parent)
|
||||
go func() {
|
||||
timer := time.NewTimer(timeout)
|
||||
defer TimerStop(timer)
|
||||
select {
|
||||
case <-child.Finished():
|
||||
case <-timer.C:
|
||||
child.Cancel(CancellationTimeoutError)
|
||||
}
|
||||
}()
|
||||
return child
|
||||
}
|
||||
|
||||
// CancellationWithTimeout returns a ChildCancellation that will automatically be Cancelled with a CancellationTimeoutError after the specified deadline.
|
||||
func CancellationWithDeadline(parent Cancellation, deadline time.Time) Cancellation {
|
||||
return CancellationWithTimeout(parent, deadline.Sub(time.Now()))
|
||||
}
|
|
@ -30,9 +30,8 @@ func UnlockThread() {
|
|||
func ResizeBytes(bs []byte, length int) []byte {
|
||||
if cap(bs) >= length {
|
||||
return bs[:length]
|
||||
} else {
|
||||
return make([]byte, length)
|
||||
}
|
||||
return make([]byte, length)
|
||||
}
|
||||
|
||||
// TimerStop stops a timer and makes sure the channel is drained, returns true if the timer was stopped before firing.
|
||||
|
@ -47,7 +46,7 @@ func TimerStop(t *time.Timer) bool {
|
|||
|
||||
// FuncTimeout runs the provided function in a separate goroutine, and returns true if the function finishes executing before the timeout passes, or false if the timeout passes.
|
||||
// It includes no mechanism to stop the function if the timeout fires, so the user is expected to do so on their own (such as with a Cancellation or a context).
|
||||
func FuncTimeout(f func(), timeout time.Duration) bool {
|
||||
func FuncTimeout(timeout time.Duration, f func()) bool {
|
||||
success := make(chan struct{})
|
||||
go func() {
|
||||
defer close(success)
|
||||
|
|
|
@ -1,557 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/gologme/log"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
// Peer represents a single peer object. This contains information from the
|
||||
// preferred switch port for this peer, although there may be more than one
|
||||
// active switch port connection to the peer in reality.
|
||||
//
|
||||
// This struct is informational only - you cannot manipulate peer connections
|
||||
// using instances of this struct. You should use the AddPeer or RemovePeer
|
||||
// functions instead.
|
||||
type Peer struct {
|
||||
PublicKey crypto.BoxPubKey // The public key of the remote node
|
||||
Endpoint string // The connection string used to connect to the peer
|
||||
BytesSent uint64 // Number of bytes sent to this peer
|
||||
BytesRecvd uint64 // Number of bytes received from this peer
|
||||
Protocol string // The transport protocol that this peer is connected with, typically "tcp"
|
||||
Port uint64 // Switch port number for this peer connection
|
||||
Uptime time.Duration // How long this peering has been active for
|
||||
}
|
||||
|
||||
// SwitchPeer represents a switch connection to a peer. Note that there may be
|
||||
// multiple switch peers per actual peer, e.g. if there are multiple connections
|
||||
// to a given node.
|
||||
//
|
||||
// This struct is informational only - you cannot manipulate switch peer
|
||||
// connections using instances of this struct. You should use the AddPeer or
|
||||
// RemovePeer functions instead.
|
||||
type SwitchPeer struct {
|
||||
PublicKey crypto.BoxPubKey // The public key of the remote node
|
||||
Coords []uint64 // The coordinates of the remote node
|
||||
BytesSent uint64 // Number of bytes sent via this switch port
|
||||
BytesRecvd uint64 // Number of bytes received via this switch port
|
||||
Port uint64 // Switch port number for this switch peer
|
||||
Protocol string // The transport protocol that this switch port is connected with, typically "tcp"
|
||||
Endpoint string // The connection string used to connect to the switch peer
|
||||
}
|
||||
|
||||
// DHTEntry represents a single DHT entry that has been learned or cached from
|
||||
// DHT searches.
|
||||
type DHTEntry struct {
|
||||
PublicKey crypto.BoxPubKey
|
||||
Coords []uint64
|
||||
LastSeen time.Duration
|
||||
}
|
||||
|
||||
// DHTRes represents a DHT response, as returned by DHTPing.
|
||||
type DHTRes struct {
|
||||
PublicKey crypto.BoxPubKey // key of the sender
|
||||
Coords []uint64 // coords of the sender
|
||||
Dest crypto.NodeID // the destination node ID
|
||||
Infos []DHTEntry // response
|
||||
}
|
||||
|
||||
// NodeInfoPayload represents a RequestNodeInfo response, in bytes.
|
||||
type NodeInfoPayload []byte
|
||||
|
||||
// SwitchQueues represents information from the switch related to link
|
||||
// congestion and a list of switch queues created in response to congestion on a
|
||||
// given link.
|
||||
type SwitchQueues struct {
|
||||
Queues []SwitchQueue // An array of SwitchQueue objects containing information about individual queues
|
||||
Count uint64 // The current number of active switch queues
|
||||
Size uint64 // The current total size of active switch queues
|
||||
HighestCount uint64 // The highest recorded number of switch queues so far
|
||||
HighestSize uint64 // The highest recorded total size of switch queues so far
|
||||
MaximumSize uint64 // The maximum allowed total size of switch queues, as specified by config
|
||||
}
|
||||
|
||||
// SwitchQueue represents a single switch queue. Switch queues are only created
|
||||
// in response to congestion on a given link and represent how much data has
|
||||
// been temporarily cached for sending once the congestion has cleared.
|
||||
type SwitchQueue struct {
|
||||
ID string // The ID of the switch queue
|
||||
Size uint64 // The total size, in bytes, of the queue
|
||||
Packets uint64 // The number of packets in the queue
|
||||
Port uint64 // The switch port to which the queue applies
|
||||
}
|
||||
|
||||
// Session represents an open session with another node. Sessions are opened in
|
||||
// response to traffic being exchanged between two nodes using Conn objects.
|
||||
// Note that sessions will automatically be closed by Yggdrasil if no traffic is
|
||||
// exchanged for around two minutes.
|
||||
type Session struct {
|
||||
PublicKey crypto.BoxPubKey // The public key of the remote node
|
||||
Coords []uint64 // The coordinates of the remote node
|
||||
BytesSent uint64 // Bytes sent to the session
|
||||
BytesRecvd uint64 // Bytes received from the session
|
||||
MTU MTU // The maximum supported message size of the session
|
||||
Uptime time.Duration // How long this session has been active for
|
||||
WasMTUFixed bool // This field is no longer used
|
||||
}
|
||||
|
||||
// GetPeers returns one or more Peer objects containing information about active
|
||||
// peerings with other Yggdrasil nodes, where one of the responses always
|
||||
// includes information about the current node (with a port number of 0). If
|
||||
// there is exactly one entry then this node is not connected to any other nodes
|
||||
// and is therefore isolated.
|
||||
func (c *Core) GetPeers() []Peer {
|
||||
ports := c.peers.ports.Load().(map[switchPort]*peer)
|
||||
var peers []Peer
|
||||
var ps []switchPort
|
||||
for port := range ports {
|
||||
ps = append(ps, port)
|
||||
}
|
||||
sort.Slice(ps, func(i, j int) bool { return ps[i] < ps[j] })
|
||||
for _, port := range ps {
|
||||
p := ports[port]
|
||||
var info Peer
|
||||
phony.Block(p, func() {
|
||||
info = Peer{
|
||||
Endpoint: p.intf.name,
|
||||
BytesSent: p.bytesSent,
|
||||
BytesRecvd: p.bytesRecvd,
|
||||
Protocol: p.intf.info.linkType,
|
||||
Port: uint64(port),
|
||||
Uptime: time.Since(p.firstSeen),
|
||||
}
|
||||
copy(info.PublicKey[:], p.box[:])
|
||||
})
|
||||
peers = append(peers, info)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// GetSwitchPeers returns zero or more SwitchPeer objects containing information
|
||||
// about switch port connections with other Yggdrasil nodes. Note that, unlike
|
||||
// GetPeers, GetSwitchPeers does not include information about the current node,
|
||||
// therefore it is possible for this to return zero elements if the node is
|
||||
// isolated or not connected to any peers.
|
||||
func (c *Core) GetSwitchPeers() []SwitchPeer {
|
||||
var switchpeers []SwitchPeer
|
||||
table := c.switchTable.table.Load().(lookupTable)
|
||||
peers := c.peers.ports.Load().(map[switchPort]*peer)
|
||||
for _, elem := range table.elems {
|
||||
peer, isIn := peers[elem.port]
|
||||
if !isIn {
|
||||
continue
|
||||
}
|
||||
coords := elem.locator.getCoords()
|
||||
var info SwitchPeer
|
||||
phony.Block(peer, func() {
|
||||
info = SwitchPeer{
|
||||
Coords: append([]uint64{}, wire_coordsBytestoUint64s(coords)...),
|
||||
BytesSent: peer.bytesSent,
|
||||
BytesRecvd: peer.bytesRecvd,
|
||||
Port: uint64(elem.port),
|
||||
Protocol: peer.intf.info.linkType,
|
||||
Endpoint: peer.intf.info.remote,
|
||||
}
|
||||
copy(info.PublicKey[:], peer.box[:])
|
||||
})
|
||||
switchpeers = append(switchpeers, info)
|
||||
}
|
||||
return switchpeers
|
||||
}
|
||||
|
||||
// GetDHT returns zero or more entries as stored in the DHT, cached primarily
|
||||
// from searches that have already taken place.
|
||||
func (c *Core) GetDHT() []DHTEntry {
|
||||
var dhtentries []DHTEntry
|
||||
getDHT := func() {
|
||||
now := time.Now()
|
||||
var dhtentry []*dhtInfo
|
||||
for _, v := range c.router.dht.table {
|
||||
dhtentry = append(dhtentry, v)
|
||||
}
|
||||
sort.SliceStable(dhtentry, func(i, j int) bool {
|
||||
return dht_ordered(&c.router.dht.nodeID, dhtentry[i].getNodeID(), dhtentry[j].getNodeID())
|
||||
})
|
||||
for _, v := range dhtentry {
|
||||
info := DHTEntry{
|
||||
Coords: append([]uint64{}, wire_coordsBytestoUint64s(v.coords)...),
|
||||
LastSeen: now.Sub(v.recv),
|
||||
}
|
||||
copy(info.PublicKey[:], v.key[:])
|
||||
dhtentries = append(dhtentries, info)
|
||||
}
|
||||
}
|
||||
phony.Block(&c.router, getDHT)
|
||||
return dhtentries
|
||||
}
|
||||
|
||||
// GetSwitchQueues returns information about the switch queues that are
|
||||
// currently in effect. These values can change within an instant.
|
||||
func (c *Core) GetSwitchQueues() SwitchQueues {
|
||||
var switchqueues SwitchQueues
|
||||
switchTable := &c.switchTable
|
||||
getSwitchQueues := func() {
|
||||
switchqueues = SwitchQueues{
|
||||
Count: uint64(len(switchTable.queues.bufs)),
|
||||
Size: switchTable.queues.size,
|
||||
HighestCount: uint64(switchTable.queues.maxbufs),
|
||||
HighestSize: switchTable.queues.maxsize,
|
||||
MaximumSize: switchTable.queues.totalMaxSize,
|
||||
}
|
||||
for k, v := range switchTable.queues.bufs {
|
||||
nexthop := switchTable.bestPortForCoords([]byte(k))
|
||||
queue := SwitchQueue{
|
||||
ID: k,
|
||||
Size: v.size,
|
||||
Packets: uint64(len(v.packets)),
|
||||
Port: uint64(nexthop),
|
||||
}
|
||||
switchqueues.Queues = append(switchqueues.Queues, queue)
|
||||
}
|
||||
}
|
||||
phony.Block(&c.switchTable, getSwitchQueues)
|
||||
return switchqueues
|
||||
}
|
||||
|
||||
// GetSessions returns a list of open sessions from this node to other nodes.
|
||||
func (c *Core) GetSessions() []Session {
|
||||
var sessions []Session
|
||||
getSessions := func() {
|
||||
for _, sinfo := range c.router.sessions.sinfos {
|
||||
var session Session
|
||||
workerFunc := func() {
|
||||
session = Session{
|
||||
Coords: append([]uint64{}, wire_coordsBytestoUint64s(sinfo.coords)...),
|
||||
MTU: sinfo._getMTU(),
|
||||
BytesSent: sinfo.bytesSent,
|
||||
BytesRecvd: sinfo.bytesRecvd,
|
||||
Uptime: time.Now().Sub(sinfo.timeOpened),
|
||||
WasMTUFixed: sinfo.wasMTUFixed,
|
||||
}
|
||||
copy(session.PublicKey[:], sinfo.theirPermPub[:])
|
||||
}
|
||||
phony.Block(sinfo, workerFunc)
|
||||
// TODO? skipped known but timed out sessions?
|
||||
sessions = append(sessions, session)
|
||||
}
|
||||
}
|
||||
phony.Block(&c.router, getSessions)
|
||||
return sessions
|
||||
}
|
||||
|
||||
// ConnListen returns a listener for Yggdrasil session connections. You can only
|
||||
// call this function once as each Yggdrasil node can only have a single
|
||||
// ConnListener. Make sure to keep the reference to this for as long as it is
|
||||
// needed.
|
||||
func (c *Core) ConnListen() (*Listener, error) {
|
||||
c.router.sessions.listenerMutex.Lock()
|
||||
defer c.router.sessions.listenerMutex.Unlock()
|
||||
if c.router.sessions.listener != nil {
|
||||
return nil, errors.New("a listener already exists")
|
||||
}
|
||||
c.router.sessions.listener = &Listener{
|
||||
core: c,
|
||||
conn: make(chan *Conn),
|
||||
close: make(chan interface{}),
|
||||
}
|
||||
return c.router.sessions.listener, nil
|
||||
}
|
||||
|
||||
// ConnDialer returns a dialer for Yggdrasil session connections. Since
|
||||
// ConnDialers are stateless, you can request as many dialers as you like,
|
||||
// although ideally you should request only one and keep the reference to it for
|
||||
// as long as it is needed.
|
||||
func (c *Core) ConnDialer() (*Dialer, error) {
|
||||
return &Dialer{
|
||||
core: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListenTCP starts a new TCP listener. The input URI should match that of the
|
||||
// "Listen" configuration item, e.g.
|
||||
// tcp://a.b.c.d:e
|
||||
func (c *Core) ListenTCP(uri string) (*TcpListener, error) {
|
||||
return c.link.tcp.listen(uri, nil)
|
||||
}
|
||||
|
||||
// ListenTLS starts a new TLS listener. The input URI should match that of the
|
||||
// "Listen" configuration item, e.g.
|
||||
// tls://a.b.c.d:e
|
||||
func (c *Core) ListenTLS(uri string) (*TcpListener, error) {
|
||||
return c.link.tcp.listen(uri, c.link.tcp.tls.forListener)
|
||||
}
|
||||
|
||||
// NodeID gets the node ID. This is derived from your router encryption keys.
|
||||
// Remote nodes wanting to open connections to your node will need to know your
|
||||
// node ID.
|
||||
func (c *Core) NodeID() *crypto.NodeID {
|
||||
return crypto.GetNodeID(&c.boxPub)
|
||||
}
|
||||
|
||||
// TreeID gets the tree ID. This is derived from your switch signing keys. There
|
||||
// is typically no need to share this key.
|
||||
func (c *Core) TreeID() *crypto.TreeID {
|
||||
return crypto.GetTreeID(&c.sigPub)
|
||||
}
|
||||
|
||||
// SigningPublicKey gets the node's signing public key, as used by the switch.
|
||||
func (c *Core) SigningPublicKey() string {
|
||||
return hex.EncodeToString(c.sigPub[:])
|
||||
}
|
||||
|
||||
// EncryptionPublicKey gets the node's encryption public key, as used by the
|
||||
// router.
|
||||
func (c *Core) EncryptionPublicKey() string {
|
||||
return hex.EncodeToString(c.boxPub[:])
|
||||
}
|
||||
|
||||
// Coords returns the current coordinates of the node. Note that these can
|
||||
// change at any time for a number of reasons, not limited to but including
|
||||
// changes to peerings (either yours or a parent nodes) or changes to the network
|
||||
// root.
|
||||
//
|
||||
// This function may return an empty array - this is normal behaviour if either
|
||||
// you are the root of the network that you are connected to, or you are not
|
||||
// connected to any other nodes (effectively making you the root of a
|
||||
// single-node network).
|
||||
func (c *Core) Coords() []uint64 {
|
||||
table := c.switchTable.table.Load().(lookupTable)
|
||||
return wire_coordsBytestoUint64s(table.self.getCoords())
|
||||
}
|
||||
|
||||
// Address gets the IPv6 address of the Yggdrasil node. This is always a /128
|
||||
// address. The IPv6 address is only relevant when the node is operating as an
|
||||
// IP router and often is meaningless when embedded into an application, unless
|
||||
// that application also implements either VPN functionality or deals with IP
|
||||
// packets specifically.
|
||||
func (c *Core) Address() net.IP {
|
||||
address := net.IP(address.AddrForNodeID(c.NodeID())[:])
|
||||
return address
|
||||
}
|
||||
|
||||
// Subnet gets the routed IPv6 subnet of the Yggdrasil node. This is always a
|
||||
// /64 subnet. The IPv6 subnet is only relevant when the node is operating as an
|
||||
// IP router and often is meaningless when embedded into an application, unless
|
||||
// that application also implements either VPN functionality or deals with IP
|
||||
// packets specifically.
|
||||
func (c *Core) Subnet() net.IPNet {
|
||||
subnet := address.SubnetForNodeID(c.NodeID())[:]
|
||||
subnet = append(subnet, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
return net.IPNet{IP: subnet, Mask: net.CIDRMask(64, 128)}
|
||||
}
|
||||
|
||||
// MyNodeInfo gets the currently configured nodeinfo. NodeInfo is typically
|
||||
// specified through the "NodeInfo" option in the node configuration or using
|
||||
// the SetNodeInfo function, although it may also contain other built-in values
|
||||
// such as "buildname", "buildversion" etc.
|
||||
func (c *Core) MyNodeInfo() NodeInfoPayload {
|
||||
return c.router.nodeinfo.getNodeInfo()
|
||||
}
|
||||
|
||||
// SetNodeInfo sets the local nodeinfo. Note that nodeinfo can be any value or
|
||||
// struct, it will be serialised into JSON automatically.
|
||||
func (c *Core) SetNodeInfo(nodeinfo interface{}, nodeinfoprivacy bool) {
|
||||
c.router.nodeinfo.setNodeInfo(nodeinfo, nodeinfoprivacy)
|
||||
}
|
||||
|
||||
// GetMaximumSessionMTU returns the maximum allowed session MTU size.
|
||||
func (c *Core) GetMaximumSessionMTU() MTU {
|
||||
var mtu MTU
|
||||
phony.Block(&c.router, func() {
|
||||
mtu = c.router.sessions.myMaximumMTU
|
||||
})
|
||||
return mtu
|
||||
}
|
||||
|
||||
// SetMaximumSessionMTU sets the maximum allowed session MTU size. The default
|
||||
// value is 65535 bytes. Session pings will be sent to update all open sessions
|
||||
// if the MTU has changed.
|
||||
func (c *Core) SetMaximumSessionMTU(mtu MTU) {
|
||||
phony.Block(&c.router, func() {
|
||||
if c.router.sessions.myMaximumMTU != mtu {
|
||||
c.router.sessions.myMaximumMTU = mtu
|
||||
c.router.sessions.reconfigure()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetNodeInfo requests nodeinfo from a remote node, as specified by the public
|
||||
// key and coordinates specified. The third parameter specifies whether a cached
|
||||
// result is acceptable - this results in less traffic being generated than is
|
||||
// necessary when, e.g. crawling the network.
|
||||
func (c *Core) GetNodeInfo(key crypto.BoxPubKey, coords []uint64, nocache bool) (NodeInfoPayload, error) {
|
||||
response := make(chan *NodeInfoPayload, 1)
|
||||
c.router.nodeinfo.addCallback(key, func(nodeinfo *NodeInfoPayload) {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case response <- nodeinfo:
|
||||
default:
|
||||
}
|
||||
})
|
||||
c.router.nodeinfo.sendNodeInfo(key, wire_coordsUint64stoBytes(coords), false)
|
||||
phony.Block(&c.router.nodeinfo, func() {}) // Wait for sendNodeInfo before starting timer
|
||||
timer := time.AfterFunc(6*time.Second, func() { close(response) })
|
||||
defer timer.Stop()
|
||||
for res := range response {
|
||||
return *res, nil
|
||||
}
|
||||
return NodeInfoPayload{}, fmt.Errorf("getNodeInfo timeout: %s", hex.EncodeToString(key[:]))
|
||||
}
|
||||
|
||||
// SetSessionGatekeeper allows you to configure a handler function for deciding
|
||||
// whether a session should be allowed or not. The default session firewall is
|
||||
// implemented in this way. The function receives the public key of the remote
|
||||
// side and a boolean which is true if we initiated the session or false if we
|
||||
// received an incoming session request. The function should return true to
|
||||
// allow the session or false to reject it.
|
||||
func (c *Core) SetSessionGatekeeper(f func(pubkey *crypto.BoxPubKey, initiator bool) bool) {
|
||||
c.router.sessions.isAllowedMutex.Lock()
|
||||
defer c.router.sessions.isAllowedMutex.Unlock()
|
||||
|
||||
c.router.sessions.isAllowedHandler = f
|
||||
}
|
||||
|
||||
// SetLogger sets the output logger of the Yggdrasil node after startup. This
|
||||
// may be useful if you want to redirect the output later. Note that this
|
||||
// expects a Logger from the github.com/gologme/log package and not from Go's
|
||||
// built-in log package.
|
||||
func (c *Core) SetLogger(log *log.Logger) {
|
||||
c.log = log
|
||||
}
|
||||
|
||||
// AddPeer adds a peer. This should be specified in the peer URI format, e.g.:
|
||||
// tcp://a.b.c.d:e
|
||||
// socks://a.b.c.d:e/f.g.h.i:j
|
||||
// This adds the peer to the peer list, so that they will be called again if the
|
||||
// connection drops.
|
||||
func (c *Core) AddPeer(addr string, sintf string) error {
|
||||
if err := c.CallPeer(addr, sintf); err != nil {
|
||||
// TODO: We maybe want this to write the peer to the persistent
|
||||
// configuration even if a connection attempt fails, but first we'll need to
|
||||
// move the code to check the peer URI so that we don't deliberately save a
|
||||
// peer with a known bad URI. Loading peers from config should really do the
|
||||
// same thing too but I don't think that happens today
|
||||
return err
|
||||
}
|
||||
c.config.Mutex.Lock()
|
||||
defer c.config.Mutex.Unlock()
|
||||
if sintf == "" {
|
||||
for _, peer := range c.config.Current.Peers {
|
||||
if peer == addr {
|
||||
return errors.New("peer already added")
|
||||
}
|
||||
}
|
||||
c.config.Current.Peers = append(c.config.Current.Peers, addr)
|
||||
} else {
|
||||
if _, ok := c.config.Current.InterfacePeers[sintf]; ok {
|
||||
for _, peer := range c.config.Current.InterfacePeers[sintf] {
|
||||
if peer == addr {
|
||||
return errors.New("peer already added")
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := c.config.Current.InterfacePeers[sintf]; !ok {
|
||||
c.config.Current.InterfacePeers[sintf] = []string{addr}
|
||||
} else {
|
||||
c.config.Current.InterfacePeers[sintf] = append(c.config.Current.InterfacePeers[sintf], addr)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePeer is not implemented yet.
|
||||
func (c *Core) RemovePeer(addr string, sintf string) error {
|
||||
// TODO: Implement a reverse of AddPeer, where we look up the port number
|
||||
// based on the addr and sintf, disconnect it and then remove it from the
|
||||
// peers list so we don't reconnect to it later
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
// CallPeer calls a peer once. This should be specified in the peer URI format,
|
||||
// e.g.:
|
||||
// tcp://a.b.c.d:e
|
||||
// socks://a.b.c.d:e/f.g.h.i:j
|
||||
// This does not add the peer to the peer list, so if the connection drops, the
|
||||
// peer will not be called again automatically.
|
||||
func (c *Core) CallPeer(addr string, sintf string) error {
|
||||
return c.link.call(addr, sintf)
|
||||
}
|
||||
|
||||
// DisconnectPeer disconnects a peer once. This should be specified as a port
|
||||
// number.
|
||||
func (c *Core) DisconnectPeer(port uint64) error {
|
||||
c.peers.removePeer(switchPort(port))
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllowedEncryptionPublicKeys returns the public keys permitted for incoming
|
||||
// peer connections. If this list is empty then all incoming peer connections
|
||||
// are accepted by default.
|
||||
func (c *Core) GetAllowedEncryptionPublicKeys() []string {
|
||||
return c.peers.getAllowedEncryptionPublicKeys()
|
||||
}
|
||||
|
||||
// AddAllowedEncryptionPublicKey whitelists a key for incoming peer connections.
|
||||
// By default all incoming peer connections are accepted, but adding public keys
|
||||
// to the whitelist using this function enables strict checking from that point
|
||||
// forward. Once the whitelist is enabled, only peer connections from
|
||||
// whitelisted public keys will be accepted.
|
||||
func (c *Core) AddAllowedEncryptionPublicKey(bstr string) (err error) {
|
||||
c.peers.addAllowedEncryptionPublicKey(bstr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAllowedEncryptionPublicKey removes a key from the whitelist for
|
||||
// incoming peer connections. If none are set, an empty list permits all
|
||||
// incoming connections.
|
||||
func (c *Core) RemoveAllowedEncryptionPublicKey(bstr string) (err error) {
|
||||
c.peers.removeAllowedEncryptionPublicKey(bstr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DHTPing sends a DHT ping to the node with the provided key and coords,
|
||||
// optionally looking up the specified target NodeID.
|
||||
func (c *Core) DHTPing(key crypto.BoxPubKey, coords []uint64, target *crypto.NodeID) (DHTRes, error) {
|
||||
resCh := make(chan *dhtRes, 1)
|
||||
info := dhtInfo{
|
||||
key: key,
|
||||
coords: wire_coordsUint64stoBytes(coords),
|
||||
}
|
||||
if target == nil {
|
||||
target = info.getNodeID()
|
||||
}
|
||||
rq := dhtReqKey{info.key, *target}
|
||||
sendPing := func() {
|
||||
c.router.dht.addCallback(&rq, func(res *dhtRes) {
|
||||
resCh <- res
|
||||
})
|
||||
c.router.dht.ping(&info, &rq.dest)
|
||||
}
|
||||
phony.Block(&c.router, sendPing)
|
||||
// TODO: do something better than the below...
|
||||
res := <-resCh
|
||||
if res != nil {
|
||||
r := DHTRes{
|
||||
Coords: append([]uint64{}, wire_coordsBytestoUint64s(res.Coords)...),
|
||||
}
|
||||
copy(r.PublicKey[:], res.Key[:])
|
||||
for _, i := range res.Infos {
|
||||
e := DHTEntry{
|
||||
Coords: append([]uint64{}, wire_coordsBytestoUint64s(i.coords)...),
|
||||
}
|
||||
copy(e.PublicKey[:], i.key[:])
|
||||
r.Infos = append(r.Infos, e)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
return DHTRes{}, fmt.Errorf("DHT ping timeout: %s", hex.EncodeToString(key[:]))
|
||||
}
|
|
@ -1,398 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/types"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
type MTU = types.MTU
|
||||
|
||||
// ConnError implements the net.Error interface
|
||||
type ConnError struct {
|
||||
error
|
||||
timeout bool
|
||||
temporary bool
|
||||
closed bool
|
||||
maxsize int
|
||||
}
|
||||
|
||||
// Timeout returns true if the error relates to a timeout condition on the
|
||||
// connection.
|
||||
func (e *ConnError) Timeout() bool {
|
||||
return e.timeout
|
||||
}
|
||||
|
||||
// Temporary return true if the error is temporary or false if it is a permanent
|
||||
// error condition.
|
||||
func (e *ConnError) Temporary() bool {
|
||||
return e.temporary
|
||||
}
|
||||
|
||||
// PacketTooBig returns in response to sending a packet that is too large, and
|
||||
// if so, the maximum supported packet size that should be used for the
|
||||
// connection.
|
||||
func (e *ConnError) PacketTooBig() bool {
|
||||
return e.maxsize > 0
|
||||
}
|
||||
|
||||
// PacketMaximumSize returns the maximum supported packet size. This will only
|
||||
// return a non-zero value if ConnError.PacketTooBig() returns true.
|
||||
func (e *ConnError) PacketMaximumSize() int {
|
||||
if !e.PacketTooBig() {
|
||||
return 0
|
||||
}
|
||||
return e.maxsize
|
||||
}
|
||||
|
||||
// Closed returns if the session is already closed and is now unusable.
|
||||
func (e *ConnError) Closed() bool {
|
||||
return e.closed
|
||||
}
|
||||
|
||||
// The Conn struct is a reference to an active connection session between the
|
||||
// local node and a remote node. Conn implements the io.ReadWriteCloser
|
||||
// interface and is used to send and receive traffic with a remote node.
|
||||
type Conn struct {
|
||||
phony.Inbox
|
||||
core *Core
|
||||
readDeadline *time.Time
|
||||
writeDeadline *time.Time
|
||||
nodeID *crypto.NodeID
|
||||
nodeMask *crypto.NodeID
|
||||
session *sessionInfo
|
||||
mtu MTU
|
||||
readCallback func([]byte)
|
||||
readBuffer chan []byte
|
||||
}
|
||||
|
||||
// TODO func NewConn() that initializes additional fields as needed
|
||||
func newConn(core *Core, nodeID *crypto.NodeID, nodeMask *crypto.NodeID, session *sessionInfo) *Conn {
|
||||
conn := Conn{
|
||||
core: core,
|
||||
nodeID: nodeID,
|
||||
nodeMask: nodeMask,
|
||||
session: session,
|
||||
readBuffer: make(chan []byte, 1024),
|
||||
}
|
||||
return &conn
|
||||
}
|
||||
|
||||
// String returns a string that uniquely identifies a connection. Currently this
|
||||
// takes a form similar to "conn=0x0000000", which contains a memory reference
|
||||
// to the Conn object. While this value should always be unique for each Conn
|
||||
// object, the format of this is not strictly defined and may change in the
|
||||
// future.
|
||||
func (c *Conn) String() string {
|
||||
var s string
|
||||
phony.Block(c, func() { s = fmt.Sprintf("conn=%p", c) })
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *Conn) setMTU(from phony.Actor, mtu MTU) {
|
||||
c.Act(from, func() { c.mtu = mtu })
|
||||
}
|
||||
|
||||
// This should never be called from an actor, used in the dial functions
|
||||
func (c *Conn) search() error {
|
||||
var err error
|
||||
done := make(chan struct{})
|
||||
phony.Block(&c.core.router, func() {
|
||||
_, isIn := c.core.router.searches.searches[*c.nodeID]
|
||||
if !isIn {
|
||||
searchCompleted := func(sinfo *sessionInfo, e error) {
|
||||
select {
|
||||
case <-done:
|
||||
// Somehow this was called multiple times, TODO don't let that happen
|
||||
if sinfo != nil {
|
||||
// Need to clean up to avoid a session leak
|
||||
sinfo.cancel.Cancel(nil)
|
||||
sinfo.sessions.removeSession(sinfo)
|
||||
}
|
||||
default:
|
||||
if sinfo != nil {
|
||||
// Finish initializing the session
|
||||
c.session = sinfo
|
||||
c.session.setConn(nil, c)
|
||||
c.nodeID = crypto.GetNodeID(&c.session.theirPermPub)
|
||||
for i := range c.nodeMask {
|
||||
c.nodeMask[i] = 0xFF
|
||||
}
|
||||
}
|
||||
err = e
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
sinfo := c.core.router.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
||||
sinfo.startSearch()
|
||||
} else {
|
||||
err = errors.New("search already exists")
|
||||
close(done)
|
||||
}
|
||||
})
|
||||
<-done
|
||||
if c.session == nil && err == nil {
|
||||
panic("search failed but returned no error")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Used in session keep-alive traffic
|
||||
func (c *Conn) doSearch() {
|
||||
routerWork := func() {
|
||||
// Check to see if there is a search already matching the destination
|
||||
sinfo, isIn := c.core.router.searches.searches[*c.nodeID]
|
||||
if !isIn {
|
||||
// Nothing was found, so create a new search
|
||||
searchCompleted := func(sinfo *sessionInfo, e error) {}
|
||||
sinfo = c.core.router.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
||||
c.core.log.Debugf("%s DHT search started: %p", c.String(), sinfo)
|
||||
// Start the search
|
||||
sinfo.startSearch()
|
||||
}
|
||||
}
|
||||
c.core.router.Act(c.session, routerWork)
|
||||
}
|
||||
|
||||
func (c *Conn) _getDeadlineCancellation(t *time.Time) (util.Cancellation, bool) {
|
||||
if t != nil {
|
||||
// A deadline is set, so return a Cancellation that uses it
|
||||
c := util.CancellationWithDeadline(c.session.cancel, *t)
|
||||
return c, true
|
||||
} else {
|
||||
// No deadline was set, so just return the existing cancellation and a dummy value
|
||||
return c.session.cancel, false
|
||||
}
|
||||
}
|
||||
|
||||
// SetReadCallback allows you to specify a function that will be called whenever
|
||||
// a packet is received. This should be used if you wish to implement
|
||||
// asynchronous patterns for receiving data from the remote node.
|
||||
//
|
||||
// Note that if a read callback has been supplied, you should no longer attempt
|
||||
// to use the synchronous Read function.
|
||||
func (c *Conn) SetReadCallback(callback func([]byte)) {
|
||||
c.Act(nil, func() {
|
||||
c.readCallback = callback
|
||||
c._drainReadBuffer()
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Conn) _drainReadBuffer() {
|
||||
if c.readCallback == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case bs := <-c.readBuffer:
|
||||
c.readCallback(bs)
|
||||
c.Act(nil, c._drainReadBuffer) // In case there's more
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Called by the session to pass a new message to the Conn
|
||||
func (c *Conn) recvMsg(from phony.Actor, msg []byte) {
|
||||
c.Act(from, func() {
|
||||
if c.readCallback != nil {
|
||||
c.readCallback(msg)
|
||||
} else {
|
||||
select {
|
||||
case c.readBuffer <- msg:
|
||||
default:
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Used internally by Read, the caller is responsible for util.PutBytes when they're done.
|
||||
func (c *Conn) readNoCopy() ([]byte, error) {
|
||||
var cancel util.Cancellation
|
||||
var doCancel bool
|
||||
phony.Block(c, func() { cancel, doCancel = c._getDeadlineCancellation(c.readDeadline) })
|
||||
if doCancel {
|
||||
defer cancel.Cancel(nil)
|
||||
}
|
||||
// Wait for some traffic to come through from the session
|
||||
select {
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
return nil, ConnError{errors.New("read timeout"), true, false, false, 0}
|
||||
} else {
|
||||
return nil, ConnError{errors.New("session closed"), false, false, true, 0}
|
||||
}
|
||||
case bs := <-c.readBuffer:
|
||||
return bs, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Read allows you to read from the connection in a synchronous fashion. The
|
||||
// function will block up until the point that either new data is available, the
|
||||
// connection has been closed or the read deadline has been reached. If the
|
||||
// function succeeds, the number of bytes read from the connection will be
|
||||
// returned. Otherwise, an error condition will be returned.
|
||||
//
|
||||
// Note that you can also implement asynchronous reads by using SetReadCallback.
|
||||
// If you do that, you should no longer attempt to use the Read function.
|
||||
func (c *Conn) Read(b []byte) (int, error) {
|
||||
bs, err := c.readNoCopy()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n := len(bs)
|
||||
if len(bs) > len(b) {
|
||||
n = len(b)
|
||||
err = ConnError{errors.New("read buffer too small for entire packet"), false, true, false, 0}
|
||||
}
|
||||
// Copy results to the output slice and clean up
|
||||
copy(b, bs)
|
||||
util.PutBytes(bs)
|
||||
// Return the number of bytes copied to the slice, along with any error
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *Conn) _write(msg FlowKeyMessage) error {
|
||||
if len(msg.Message) > int(c.mtu) {
|
||||
return ConnError{errors.New("packet too big"), true, false, false, int(c.mtu)}
|
||||
}
|
||||
c.session.Act(c, func() {
|
||||
// Send the packet
|
||||
c.session._send(msg)
|
||||
// Session keep-alive, while we wait for the crypto workers from send
|
||||
switch {
|
||||
case time.Since(c.session.time) > 6*time.Second:
|
||||
if c.session.time.Before(c.session.pingTime) && time.Since(c.session.pingTime) > 6*time.Second {
|
||||
// TODO double check that the above condition is correct
|
||||
c.doSearch()
|
||||
} else {
|
||||
c.session.ping(c.session) // TODO send from self if this becomes an actor
|
||||
}
|
||||
case c.session.reset && c.session.pingTime.Before(c.session.time):
|
||||
c.session.ping(c.session) // TODO send from self if this becomes an actor
|
||||
default: // Don't do anything, to keep traffic throttled
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteFrom should be called by a phony.Actor, and tells the Conn to send a
|
||||
// message. This is used internally by Write. If the callback is called with a
|
||||
// non-nil value, then it is safe to reuse the argument FlowKeyMessage.
|
||||
func (c *Conn) WriteFrom(from phony.Actor, msg FlowKeyMessage, callback func(error)) {
|
||||
c.Act(from, func() {
|
||||
callback(c._write(msg))
|
||||
})
|
||||
}
|
||||
|
||||
// writeNoCopy is used internally by Write and makes use of WriteFrom under the hood.
|
||||
// The caller must not reuse the argument FlowKeyMessage when a nil error is returned.
|
||||
func (c *Conn) writeNoCopy(msg FlowKeyMessage) error {
|
||||
var cancel util.Cancellation
|
||||
var doCancel bool
|
||||
phony.Block(c, func() { cancel, doCancel = c._getDeadlineCancellation(c.writeDeadline) })
|
||||
if doCancel {
|
||||
defer cancel.Cancel(nil)
|
||||
}
|
||||
var err error
|
||||
select {
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
err = ConnError{errors.New("write timeout"), true, false, false, 0}
|
||||
} else {
|
||||
err = ConnError{errors.New("session closed"), false, false, true, 0}
|
||||
}
|
||||
default:
|
||||
done := make(chan struct{})
|
||||
callback := func(e error) { err = e; close(done) }
|
||||
c.WriteFrom(nil, msg, callback)
|
||||
<-done
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Write allows you to write to the connection in a synchronous fashion. This
|
||||
// function may block until either the write has completed, the connection has
|
||||
// been closed or the write deadline has been reached. If the function succeeds,
|
||||
// the number of written bytes is returned. Otherwise, an error condition is
|
||||
// returned.
|
||||
func (c *Conn) Write(b []byte) (int, error) {
|
||||
written := len(b)
|
||||
msg := FlowKeyMessage{Message: append(util.GetBytes(), b...)}
|
||||
err := c.writeNoCopy(msg)
|
||||
if err != nil {
|
||||
util.PutBytes(msg.Message)
|
||||
written = 0
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
// Close will close an open connection and any blocking operations on the
|
||||
// connection will unblock and return. From this point forward, the connection
|
||||
// can no longer be used and you should no longer attempt to Read or Write to
|
||||
// the connection.
|
||||
func (c *Conn) Close() (err error) {
|
||||
phony.Block(c, func() {
|
||||
if c.session != nil {
|
||||
// Close the session, if it hasn't been closed already
|
||||
if e := c.session.cancel.Cancel(errors.New("connection closed")); e != nil {
|
||||
err = ConnError{errors.New("close failed, session already closed"), false, false, true, 0}
|
||||
} else {
|
||||
c.session.doRemove()
|
||||
}
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// LocalAddr returns the complete public key of the local side of the
|
||||
// connection. This is always going to return your own node's public key.
|
||||
func (c *Conn) LocalAddr() net.Addr {
|
||||
return &c.core.boxPub
|
||||
}
|
||||
|
||||
// RemoteAddr returns the complete public key of the remote side of the
|
||||
// connection.
|
||||
func (c *Conn) RemoteAddr() net.Addr {
|
||||
if c.session != nil {
|
||||
return &c.session.theirPermPub
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDeadline is equivalent to calling both SetReadDeadline and
|
||||
// SetWriteDeadline with the same value, configuring the maximum amount of time
|
||||
// that synchronous Read and Write operations can block for. If no deadline is
|
||||
// configured, Read and Write operations can potentially block indefinitely.
|
||||
func (c *Conn) SetDeadline(t time.Time) error {
|
||||
c.SetReadDeadline(t)
|
||||
c.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReadDeadline configures the maximum amount of time that a synchronous Read
|
||||
// operation can block for. A Read operation will unblock at the point that the
|
||||
// read deadline is reached if no other condition (such as data arrival or
|
||||
// connection closure) happens first. If no deadline is configured, Read
|
||||
// operations can potentially block indefinitely.
|
||||
func (c *Conn) SetReadDeadline(t time.Time) error {
|
||||
// TODO warn that this can block while waiting for the Conn actor to run, so don't call it from other actors...
|
||||
phony.Block(c, func() { c.readDeadline = &t })
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetWriteDeadline configures the maximum amount of time that a synchronous
|
||||
// Write operation can block for. A Write operation will unblock at the point
|
||||
// that the read deadline is reached if no other condition (such as data sending
|
||||
// or connection closure) happens first. If no deadline is configured, Write
|
||||
// operations can potentially block indefinitely.
|
||||
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
||||
// TODO warn that this can block while waiting for the Conn actor to run, so don't call it from other actors...
|
||||
phony.Block(c, func() { c.writeDeadline = &t })
|
||||
return nil
|
||||
}
|
|
@ -1,448 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
// A chord-like Distributed Hash Table (DHT).
|
||||
// Used to look up coords given a NodeID and bitmask (taken from an IPv6 address).
|
||||
// Keeps track of immediate successor, predecessor, and all peers.
|
||||
// Also keeps track of other nodes if they're closer in tree space than all other known nodes encountered when heading in either direction to that point, under the hypothesis that, for the kinds of networks we care about, this should probabilistically include the node needed to keep lookups to near O(logn) steps.
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
)
|
||||
|
||||
const (
|
||||
dht_lookup_size = 16
|
||||
dht_timeout = 6 * time.Minute
|
||||
dht_max_delay = 5 * time.Minute
|
||||
dht_max_delay_dirty = 30 * time.Second
|
||||
)
|
||||
|
||||
// dhtInfo represents everything we know about a node in the DHT.
|
||||
// This includes its key, a cache of its NodeID, coords, and timing/ping related info for deciding who/when to ping nodes for maintenance.
|
||||
type dhtInfo struct {
|
||||
nodeID_hidden *crypto.NodeID
|
||||
key crypto.BoxPubKey
|
||||
coords []byte
|
||||
recv time.Time // When we last received a message
|
||||
pings int // Time out if at least 3 consecutive maintenance pings drop
|
||||
throttle time.Duration
|
||||
dirty bool // Set to true if we've used this node in ping responses (for queries about someone other than the person doing the asking, i.e. real searches) since the last time we heard from the node
|
||||
}
|
||||
|
||||
// Returns the *NodeID associated with dhtInfo.key, calculating it on the fly the first time or from a cache all subsequent times.
|
||||
func (info *dhtInfo) getNodeID() *crypto.NodeID {
|
||||
if info.nodeID_hidden == nil {
|
||||
info.nodeID_hidden = crypto.GetNodeID(&info.key)
|
||||
}
|
||||
return info.nodeID_hidden
|
||||
}
|
||||
|
||||
// Request for a node to do a lookup.
|
||||
// Includes our key and coords so they can send a response back, and the destination NodeID we want to ask about.
|
||||
type dhtReq struct {
|
||||
Key crypto.BoxPubKey // Key of whoever asked
|
||||
Coords []byte // Coords of whoever asked
|
||||
Dest crypto.NodeID // NodeID they're asking about
|
||||
}
|
||||
|
||||
// Response to a DHT lookup.
|
||||
// Includes the key and coords of the node that's responding, and the destination they were asked about.
|
||||
// The main part is Infos []*dhtInfo, the lookup response.
|
||||
type dhtRes struct {
|
||||
Key crypto.BoxPubKey // key of the sender
|
||||
Coords []byte // coords of the sender
|
||||
Dest crypto.NodeID
|
||||
Infos []*dhtInfo // response
|
||||
}
|
||||
|
||||
// Parts of a DHT req usable as a key in a map.
|
||||
type dhtReqKey struct {
|
||||
key crypto.BoxPubKey
|
||||
dest crypto.NodeID
|
||||
}
|
||||
|
||||
// The main DHT struct.
|
||||
type dht struct {
|
||||
router *router
|
||||
nodeID crypto.NodeID
|
||||
reqs map[dhtReqKey]time.Time // Keeps track of recent outstanding requests
|
||||
callbacks map[dhtReqKey][]dht_callbackInfo // Search and admin lookup callbacks
|
||||
// These next two could be replaced by a single linked list or similar...
|
||||
table map[crypto.NodeID]*dhtInfo
|
||||
imp []*dhtInfo
|
||||
}
|
||||
|
||||
// Initializes the DHT.
|
||||
func (t *dht) init(r *router) {
|
||||
t.router = r
|
||||
t.nodeID = *t.router.core.NodeID()
|
||||
t.callbacks = make(map[dhtReqKey][]dht_callbackInfo)
|
||||
t.reset()
|
||||
}
|
||||
|
||||
func (t *dht) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything to do
|
||||
}
|
||||
|
||||
// Resets the DHT in response to coord changes.
|
||||
// This empties all info from the DHT and drops outstanding requests.
|
||||
func (t *dht) reset() {
|
||||
t.reqs = make(map[dhtReqKey]time.Time)
|
||||
t.table = make(map[crypto.NodeID]*dhtInfo)
|
||||
t.imp = nil
|
||||
}
|
||||
|
||||
// Does a DHT lookup and returns up to dht_lookup_size results.
|
||||
func (t *dht) lookup(nodeID *crypto.NodeID, everything bool) []*dhtInfo {
|
||||
results := make([]*dhtInfo, 0, len(t.table))
|
||||
for _, info := range t.table {
|
||||
results = append(results, info)
|
||||
}
|
||||
if len(results) > dht_lookup_size {
|
||||
// Drop the middle part, so we keep some nodes before and after.
|
||||
// This should help to bootstrap / recover more quickly.
|
||||
sort.SliceStable(results, func(i, j int) bool {
|
||||
return dht_ordered(nodeID, results[i].getNodeID(), results[j].getNodeID())
|
||||
})
|
||||
newRes := make([]*dhtInfo, 0, len(results))
|
||||
newRes = append(newRes, results[len(results)-dht_lookup_size/2:]...)
|
||||
newRes = append(newRes, results[:len(results)-dht_lookup_size/2]...)
|
||||
results = newRes
|
||||
results = results[:dht_lookup_size]
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Insert into table, preserving the time we last sent a packet if the node was already in the table, otherwise setting that time to now.
|
||||
func (t *dht) insert(info *dhtInfo) {
|
||||
if *info.getNodeID() == t.nodeID {
|
||||
// This shouldn't happen, but don't add it if it does
|
||||
return
|
||||
}
|
||||
info.recv = time.Now()
|
||||
if oldInfo, isIn := t.table[*info.getNodeID()]; isIn {
|
||||
sameCoords := true
|
||||
if len(info.coords) != len(oldInfo.coords) {
|
||||
sameCoords = false
|
||||
} else {
|
||||
for idx := 0; idx < len(info.coords); idx++ {
|
||||
if info.coords[idx] != oldInfo.coords[idx] {
|
||||
sameCoords = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if sameCoords {
|
||||
info.throttle = oldInfo.throttle
|
||||
}
|
||||
}
|
||||
t.imp = nil // It needs to update to get a pointer to the new info
|
||||
t.table[*info.getNodeID()] = info
|
||||
}
|
||||
|
||||
// Insert a peer into the table if it hasn't been pinged lately, to keep peers from dropping
|
||||
func (t *dht) insertPeer(info *dhtInfo) {
|
||||
oldInfo, isIn := t.table[*info.getNodeID()]
|
||||
if !isIn || time.Since(oldInfo.recv) > dht_max_delay+30*time.Second {
|
||||
// TODO? also check coords?
|
||||
newInfo := *info // Insert a copy
|
||||
t.insert(&newInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Return true if first/second/third are (partially) ordered correctly.
|
||||
func dht_ordered(first, second, third *crypto.NodeID) bool {
|
||||
lessOrEqual := func(first, second *crypto.NodeID) bool {
|
||||
for idx := 0; idx < crypto.NodeIDLen; idx++ {
|
||||
if first[idx] > second[idx] {
|
||||
return false
|
||||
}
|
||||
if first[idx] < second[idx] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
firstLessThanSecond := lessOrEqual(first, second)
|
||||
secondLessThanThird := lessOrEqual(second, third)
|
||||
thirdLessThanFirst := lessOrEqual(third, first)
|
||||
switch {
|
||||
case firstLessThanSecond && secondLessThanThird:
|
||||
// Nothing wrapped around 0, the easy case
|
||||
return true
|
||||
case thirdLessThanFirst && firstLessThanSecond:
|
||||
// Third wrapped around 0
|
||||
return true
|
||||
case secondLessThanThird && thirdLessThanFirst:
|
||||
// Second (and third) wrapped around 0
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Reads a request, performs a lookup, and responds.
|
||||
// Update info about the node that sent the request.
|
||||
func (t *dht) handleReq(req *dhtReq) {
|
||||
// Send them what they asked for
|
||||
loc := t.router.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
res := dhtRes{
|
||||
Key: t.router.core.boxPub,
|
||||
Coords: coords,
|
||||
Dest: req.Dest,
|
||||
Infos: t.lookup(&req.Dest, false),
|
||||
}
|
||||
t.sendRes(&res, req)
|
||||
// Also add them to our DHT
|
||||
info := dhtInfo{
|
||||
key: req.Key,
|
||||
coords: req.Coords,
|
||||
}
|
||||
if _, isIn := t.table[*info.getNodeID()]; !isIn && t.isImportant(&info) {
|
||||
t.ping(&info, nil)
|
||||
}
|
||||
// Maybe mark nodes from lookup as dirty
|
||||
if req.Dest != *info.getNodeID() {
|
||||
// This node asked about someone other than themself, so this wasn't just idle traffic.
|
||||
for _, info := range res.Infos {
|
||||
// Mark nodes dirty so we're sure to check up on them again later
|
||||
info.dirty = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sends a lookup response to the specified node.
|
||||
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
|
||||
// Send a reply for a dhtReq
|
||||
bs := res.encode()
|
||||
shared := t.router.sessions.getSharedKey(&t.router.core.boxPriv, &req.Key)
|
||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
Coords: req.Coords,
|
||||
ToKey: req.Key,
|
||||
FromKey: t.router.core.boxPub,
|
||||
Nonce: *nonce,
|
||||
Payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
t.router.out(packet)
|
||||
}
|
||||
|
||||
type dht_callbackInfo struct {
|
||||
f func(*dhtRes)
|
||||
time time.Time
|
||||
}
|
||||
|
||||
// Adds a callback and removes it after some timeout.
|
||||
func (t *dht) addCallback(rq *dhtReqKey, callback func(*dhtRes)) {
|
||||
info := dht_callbackInfo{callback, time.Now().Add(6 * time.Second)}
|
||||
t.callbacks[*rq] = append(t.callbacks[*rq], info)
|
||||
}
|
||||
|
||||
// Reads a lookup response, checks that we had sent a matching request, and processes the response info.
|
||||
// This mainly consists of updating the node we asked in our DHT (they responded, so we know they're still alive), and deciding if we want to do anything with their responses
|
||||
func (t *dht) handleRes(res *dhtRes) {
|
||||
rq := dhtReqKey{res.Key, res.Dest}
|
||||
if callbacks, isIn := t.callbacks[rq]; isIn {
|
||||
for _, callback := range callbacks {
|
||||
callback.f(res)
|
||||
}
|
||||
delete(t.callbacks, rq)
|
||||
}
|
||||
_, isIn := t.reqs[rq]
|
||||
if !isIn {
|
||||
return
|
||||
}
|
||||
delete(t.reqs, rq)
|
||||
rinfo := dhtInfo{
|
||||
key: res.Key,
|
||||
coords: res.Coords,
|
||||
}
|
||||
t.insert(&rinfo)
|
||||
for _, info := range res.Infos {
|
||||
if *info.getNodeID() == t.nodeID {
|
||||
continue
|
||||
} // Skip self
|
||||
if _, isIn := t.table[*info.getNodeID()]; isIn {
|
||||
// TODO? don't skip if coords are different?
|
||||
continue
|
||||
}
|
||||
if t.isImportant(info) {
|
||||
t.ping(info, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sends a lookup request to the specified node.
|
||||
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
|
||||
// Send a dhtReq to the node in dhtInfo
|
||||
bs := req.encode()
|
||||
shared := t.router.sessions.getSharedKey(&t.router.core.boxPriv, &dest.key)
|
||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
Coords: dest.coords,
|
||||
ToKey: dest.key,
|
||||
FromKey: t.router.core.boxPub,
|
||||
Nonce: *nonce,
|
||||
Payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
t.router.out(packet)
|
||||
rq := dhtReqKey{dest.key, req.Dest}
|
||||
t.reqs[rq] = time.Now()
|
||||
}
|
||||
|
||||
// Sends a lookup to this info, looking for the target.
|
||||
func (t *dht) ping(info *dhtInfo, target *crypto.NodeID) {
|
||||
// Creates a req for the node at dhtInfo, asking them about the target (if one is given) or themself (if no target is given)
|
||||
if target == nil {
|
||||
target = &t.nodeID
|
||||
}
|
||||
loc := t.router.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
req := dhtReq{
|
||||
Key: t.router.core.boxPub,
|
||||
Coords: coords,
|
||||
Dest: *target,
|
||||
}
|
||||
t.sendReq(&req, info)
|
||||
}
|
||||
|
||||
// Periodic maintenance work to keep important DHT nodes alive.
|
||||
func (t *dht) doMaintenance() {
|
||||
now := time.Now()
|
||||
newReqs := make(map[dhtReqKey]time.Time, len(t.reqs))
|
||||
for key, start := range t.reqs {
|
||||
if now.Sub(start) < 6*time.Second {
|
||||
newReqs[key] = start
|
||||
}
|
||||
}
|
||||
t.reqs = newReqs
|
||||
newCallbacks := make(map[dhtReqKey][]dht_callbackInfo, len(t.callbacks))
|
||||
for key, cs := range t.callbacks {
|
||||
for _, c := range cs {
|
||||
if now.Before(c.time) {
|
||||
newCallbacks[key] = append(newCallbacks[key], c)
|
||||
} else {
|
||||
// Signal failure
|
||||
c.f(nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
t.callbacks = newCallbacks
|
||||
for infoID, info := range t.table {
|
||||
switch {
|
||||
case info.pings > 6:
|
||||
// It failed to respond to too many pings
|
||||
fallthrough
|
||||
case now.Sub(info.recv) > dht_timeout:
|
||||
// It's too old
|
||||
fallthrough
|
||||
case info.dirty && now.Sub(info.recv) > dht_max_delay_dirty && !t.isImportant(info):
|
||||
// We won't ping it to refresh it, so just drop it
|
||||
delete(t.table, infoID)
|
||||
t.imp = nil
|
||||
}
|
||||
}
|
||||
for _, info := range t.getImportant() {
|
||||
switch {
|
||||
case now.Sub(info.recv) > info.throttle:
|
||||
info.throttle *= 2
|
||||
if info.throttle < time.Second {
|
||||
info.throttle = time.Second
|
||||
} else if info.throttle > dht_max_delay {
|
||||
info.throttle = dht_max_delay
|
||||
}
|
||||
fallthrough
|
||||
case info.dirty && now.Sub(info.recv) > dht_max_delay_dirty:
|
||||
t.ping(info, nil)
|
||||
info.pings++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a list of important nodes, used by isImportant.
|
||||
func (t *dht) getImportant() []*dhtInfo {
|
||||
if t.imp == nil {
|
||||
// Get a list of all known nodes
|
||||
infos := make([]*dhtInfo, 0, len(t.table))
|
||||
for _, info := range t.table {
|
||||
infos = append(infos, info)
|
||||
}
|
||||
// Sort them by increasing order in distance along the ring
|
||||
sort.SliceStable(infos, func(i, j int) bool {
|
||||
// Sort in order of predecessors (!), reverse from chord normal, because it plays nicer with zero bits for unknown parts of target addresses
|
||||
return dht_ordered(infos[j].getNodeID(), infos[i].getNodeID(), &t.nodeID)
|
||||
})
|
||||
// Keep the ones that are no further than the closest seen so far
|
||||
minDist := ^uint64(0)
|
||||
loc := t.router.core.switchTable.getLocator()
|
||||
important := infos[:0]
|
||||
for _, info := range infos {
|
||||
dist := uint64(loc.dist(info.coords))
|
||||
if dist < minDist {
|
||||
minDist = dist
|
||||
important = append(important, info)
|
||||
} else if len(important) < 2 {
|
||||
important = append(important, info)
|
||||
}
|
||||
}
|
||||
var temp []*dhtInfo
|
||||
minDist = ^uint64(0)
|
||||
for idx := len(infos) - 1; idx >= 0; idx-- {
|
||||
info := infos[idx]
|
||||
dist := uint64(loc.dist(info.coords))
|
||||
if dist < minDist {
|
||||
minDist = dist
|
||||
temp = append(temp, info)
|
||||
} else if len(temp) < 2 {
|
||||
temp = append(temp, info)
|
||||
}
|
||||
}
|
||||
for idx := len(temp) - 1; idx >= 0; idx-- {
|
||||
important = append(important, temp[idx])
|
||||
}
|
||||
t.imp = important
|
||||
}
|
||||
return t.imp
|
||||
}
|
||||
|
||||
// Returns true if this is a node we need to keep track of for the DHT to work.
|
||||
func (t *dht) isImportant(ninfo *dhtInfo) bool {
|
||||
if ninfo.key == t.router.core.boxPub {
|
||||
return false
|
||||
}
|
||||
important := t.getImportant()
|
||||
// Check if ninfo is of equal or greater importance to what we already know
|
||||
loc := t.router.core.switchTable.getLocator()
|
||||
ndist := uint64(loc.dist(ninfo.coords))
|
||||
minDist := ^uint64(0)
|
||||
for _, info := range important {
|
||||
if (*info.getNodeID() == *ninfo.getNodeID()) ||
|
||||
(ndist < minDist && dht_ordered(info.getNodeID(), ninfo.getNodeID(), &t.nodeID)) {
|
||||
// Either the same node, or a better one
|
||||
return true
|
||||
}
|
||||
dist := uint64(loc.dist(info.coords))
|
||||
if dist < minDist {
|
||||
minDist = dist
|
||||
}
|
||||
}
|
||||
minDist = ^uint64(0)
|
||||
for idx := len(important) - 1; idx >= 0; idx-- {
|
||||
info := important[idx]
|
||||
if (*info.getNodeID() == *ninfo.getNodeID()) ||
|
||||
(ndist < minDist && dht_ordered(&t.nodeID, ninfo.getNodeID(), info.getNodeID())) {
|
||||
// Either the same node, or a better one
|
||||
return true
|
||||
}
|
||||
dist := uint64(loc.dist(info.coords))
|
||||
if dist < minDist {
|
||||
minDist = dist
|
||||
}
|
||||
}
|
||||
// We didn't find any important node that ninfo is better than
|
||||
return false
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
)
|
||||
|
||||
// Dialer represents an Yggdrasil connection dialer.
|
||||
type Dialer struct {
|
||||
core *Core
|
||||
}
|
||||
|
||||
// Dial opens a session to the given node. The first parameter should be
|
||||
// "curve25519" or "nodeid" and the second parameter should contain a
|
||||
// hexadecimal representation of the target. It uses DialContext internally.
|
||||
func (d *Dialer) Dial(network, address string) (net.Conn, error) {
|
||||
return d.DialContext(nil, network, address)
|
||||
}
|
||||
|
||||
// DialContext is used internally by Dial, and should only be used with a
|
||||
// context that includes a timeout. It uses DialByNodeIDandMask internally when
|
||||
// the network is "nodeid", or DialByPublicKey when the network is "curve25519".
|
||||
func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
var nodeID crypto.NodeID
|
||||
var nodeMask crypto.NodeID
|
||||
// Process
|
||||
switch network {
|
||||
case "curve25519":
|
||||
dest, err := hex.DecodeString(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dest) != crypto.BoxPubKeyLen {
|
||||
return nil, errors.New("invalid key length supplied")
|
||||
}
|
||||
var pubKey crypto.BoxPubKey
|
||||
copy(pubKey[:], dest)
|
||||
return d.DialByPublicKey(ctx, &pubKey)
|
||||
case "nodeid":
|
||||
// A node ID was provided - we don't need to do anything special with it
|
||||
if tokens := strings.Split(address, "/"); len(tokens) == 2 {
|
||||
l, err := strconv.Atoi(tokens[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dest, err := hex.DecodeString(tokens[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(nodeID[:], dest)
|
||||
for idx := 0; idx < l; idx++ {
|
||||
nodeMask[idx/8] |= 0x80 >> byte(idx%8)
|
||||
}
|
||||
} else {
|
||||
dest, err := hex.DecodeString(tokens[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(nodeID[:], dest)
|
||||
for i := range nodeMask {
|
||||
nodeMask[i] = 0xFF
|
||||
}
|
||||
}
|
||||
return d.DialByNodeIDandMask(ctx, &nodeID, &nodeMask)
|
||||
default:
|
||||
// An unexpected address type was given, so give up
|
||||
return nil, errors.New("unexpected address type")
|
||||
}
|
||||
}
|
||||
|
||||
// DialByNodeIDandMask opens a session to the given node based on raw NodeID
|
||||
// parameters. If ctx is nil or has no timeout, then a default timeout of 6
|
||||
// seconds will apply, beginning *after* the search finishes.
|
||||
func (d *Dialer) DialByNodeIDandMask(ctx context.Context, nodeID, nodeMask *crypto.NodeID) (net.Conn, error) {
|
||||
startDial := time.Now()
|
||||
conn := newConn(d.core, nodeID, nodeMask, nil)
|
||||
if err := conn.search(); err != nil {
|
||||
// TODO: make searches take a context, so they can be cancelled early
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
endSearch := time.Now()
|
||||
d.core.log.Debugln("Dial searched for:", nodeID, "in time:", endSearch.Sub(startDial))
|
||||
conn.session.setConn(nil, conn)
|
||||
var cancel context.CancelFunc
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
ctx, cancel = context.WithTimeout(ctx, 6*time.Second)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-conn.session.init:
|
||||
endInit := time.Now()
|
||||
d.core.log.Debugln("Dial initialized session for:", nodeID, "in time:", endInit.Sub(endSearch))
|
||||
d.core.log.Debugln("Finished dial for:", nodeID, "in time:", endInit.Sub(startDial))
|
||||
return conn, nil
|
||||
case <-ctx.Done():
|
||||
conn.Close()
|
||||
return nil, errors.New("session handshake timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// DialByPublicKey opens a session to the given node based on the public key. If
|
||||
// ctx is nil or has no timeout, then a default timeout of 6 seconds will apply,
|
||||
// beginning *after* the search finishes.
|
||||
func (d *Dialer) DialByPublicKey(ctx context.Context, pubKey *crypto.BoxPubKey) (net.Conn, error) {
|
||||
nodeID := crypto.GetNodeID(pubKey)
|
||||
var nodeMask crypto.NodeID
|
||||
for i := range nodeMask {
|
||||
nodeMask[i] = 0xFF
|
||||
}
|
||||
return d.DialByNodeIDandMask(ctx, nodeID, &nodeMask)
|
||||
}
|
|
@ -1,500 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
//"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
"golang.org/x/net/proxy"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
type link struct {
|
||||
core *Core
|
||||
mutex sync.RWMutex // protects interfaces below
|
||||
interfaces map[linkInfo]*linkInterface
|
||||
tcp tcp // TCP interface support
|
||||
stopped chan struct{}
|
||||
// TODO timeout (to remove from switch), read from config.ReadTimeout
|
||||
}
|
||||
|
||||
type linkInfo struct {
|
||||
box crypto.BoxPubKey // Their encryption key
|
||||
sig crypto.SigPubKey // Their signing key
|
||||
linkType string // Type of link, e.g. TCP, AWDL
|
||||
local string // Local name or address
|
||||
remote string // Remote name or address
|
||||
}
|
||||
|
||||
type linkInterfaceMsgIO interface {
|
||||
readMsg() ([]byte, error)
|
||||
writeMsgs([][]byte) (int, error)
|
||||
close() error
|
||||
// These are temporary workarounds to stream semantics
|
||||
_sendMetaBytes([]byte) error
|
||||
_recvMetaBytes() ([]byte, error)
|
||||
}
|
||||
|
||||
type linkInterface struct {
|
||||
name string
|
||||
link *link
|
||||
peer *peer
|
||||
options linkOptions
|
||||
msgIO linkInterfaceMsgIO
|
||||
info linkInfo
|
||||
incoming bool
|
||||
force bool
|
||||
closed chan struct{}
|
||||
reader linkReader // Reads packets, notifies this linkInterface, passes packets to switch
|
||||
writer linkWriter // Writes packets, notifies this linkInterface
|
||||
phony.Inbox // Protects the below
|
||||
sendTimer *time.Timer // Fires to signal that sending is blocked
|
||||
keepAliveTimer *time.Timer // Fires to send keep-alive traffic
|
||||
stallTimer *time.Timer // Fires to signal that no incoming traffic (including keep-alive) has been seen
|
||||
closeTimer *time.Timer // Fires when the link has been idle so long we need to close it
|
||||
inSwitch bool // True if the switch is tracking this link
|
||||
stalled bool // True if we haven't been receiving any response traffic
|
||||
unstalled bool // False if an idle notification to the switch hasn't been sent because we stalled (or are first starting up)
|
||||
}
|
||||
|
||||
type linkOptions struct {
|
||||
pinnedCurve25519Keys map[crypto.BoxPubKey]struct{}
|
||||
pinnedEd25519Keys map[crypto.SigPubKey]struct{}
|
||||
}
|
||||
|
||||
func (l *link) init(c *Core) error {
|
||||
l.core = c
|
||||
l.mutex.Lock()
|
||||
l.interfaces = make(map[linkInfo]*linkInterface)
|
||||
l.mutex.Unlock()
|
||||
l.stopped = make(chan struct{})
|
||||
|
||||
if err := l.tcp.init(l); err != nil {
|
||||
c.log.Errorln("Failed to start TCP interface")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *link) reconfigure() {
|
||||
l.tcp.reconfigure()
|
||||
}
|
||||
|
||||
func (l *link) call(uri string, sintf string) error {
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("peer %s is not correctly formatted (%s)", uri, err)
|
||||
}
|
||||
pathtokens := strings.Split(strings.Trim(u.Path, "/"), "/")
|
||||
tcpOpts := tcpOptions{}
|
||||
if pubkeys, ok := u.Query()["curve25519"]; ok && len(pubkeys) > 0 {
|
||||
tcpOpts.pinnedCurve25519Keys = make(map[crypto.BoxPubKey]struct{})
|
||||
for _, pubkey := range pubkeys {
|
||||
if boxPub, err := hex.DecodeString(pubkey); err == nil {
|
||||
var boxPubKey crypto.BoxPubKey
|
||||
copy(boxPubKey[:], boxPub)
|
||||
tcpOpts.pinnedCurve25519Keys[boxPubKey] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
if pubkeys, ok := u.Query()["ed25519"]; ok && len(pubkeys) > 0 {
|
||||
tcpOpts.pinnedEd25519Keys = make(map[crypto.SigPubKey]struct{})
|
||||
for _, pubkey := range pubkeys {
|
||||
if sigPub, err := hex.DecodeString(pubkey); err == nil {
|
||||
var sigPubKey crypto.SigPubKey
|
||||
copy(sigPubKey[:], sigPub)
|
||||
tcpOpts.pinnedEd25519Keys[sigPubKey] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "tcp":
|
||||
l.tcp.call(u.Host, tcpOpts, sintf)
|
||||
case "socks":
|
||||
tcpOpts.socksProxyAddr = u.Host
|
||||
if u.User != nil {
|
||||
tcpOpts.socksProxyAuth = &proxy.Auth{}
|
||||
tcpOpts.socksProxyAuth.User = u.User.Username()
|
||||
tcpOpts.socksProxyAuth.Password, _ = u.User.Password()
|
||||
}
|
||||
l.tcp.call(pathtokens[0], tcpOpts, sintf)
|
||||
case "tls":
|
||||
tcpOpts.upgrade = l.tcp.tls.forDialer
|
||||
l.tcp.call(u.Host, tcpOpts, sintf)
|
||||
default:
|
||||
return errors.New("unknown call scheme: " + u.Scheme)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *link) listen(uri string) error {
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listener %s is not correctly formatted (%s)", uri, err)
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "tcp":
|
||||
_, err := l.tcp.listen(u.Host, nil)
|
||||
return err
|
||||
case "tls":
|
||||
_, err := l.tcp.listen(u.Host, l.tcp.tls.forListener)
|
||||
return err
|
||||
default:
|
||||
return errors.New("unknown listen scheme: " + u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *link) create(msgIO linkInterfaceMsgIO, name, linkType, local, remote string, incoming, force bool, options linkOptions) (*linkInterface, error) {
|
||||
// Technically anything unique would work for names, but let's pick something human readable, just for debugging
|
||||
intf := linkInterface{
|
||||
name: name,
|
||||
link: l,
|
||||
options: options,
|
||||
msgIO: msgIO,
|
||||
info: linkInfo{
|
||||
linkType: linkType,
|
||||
local: local,
|
||||
remote: remote,
|
||||
},
|
||||
incoming: incoming,
|
||||
force: force,
|
||||
}
|
||||
intf.writer.intf = &intf
|
||||
intf.reader.intf = &intf
|
||||
intf.reader.err = make(chan error)
|
||||
return &intf, nil
|
||||
}
|
||||
|
||||
func (l *link) stop() error {
|
||||
close(l.stopped)
|
||||
if err := l.tcp.stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (intf *linkInterface) handler() error {
|
||||
// TODO split some of this into shorter functions, so it's easier to read, and for the FIXME duplicate peer issue mentioned later
|
||||
myLinkPub, myLinkPriv := crypto.NewBoxKeys()
|
||||
meta := version_getBaseMetadata()
|
||||
meta.box = intf.link.core.boxPub
|
||||
meta.sig = intf.link.core.sigPub
|
||||
meta.link = *myLinkPub
|
||||
metaBytes := meta.encode()
|
||||
// TODO timeouts on send/recv (goroutine for send/recv, channel select w/ timer)
|
||||
var err error
|
||||
if !util.FuncTimeout(func() { err = intf.msgIO._sendMetaBytes(metaBytes) }, 30*time.Second) {
|
||||
return errors.New("timeout on metadata send")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !util.FuncTimeout(func() { metaBytes, err = intf.msgIO._recvMetaBytes() }, 30*time.Second) {
|
||||
return errors.New("timeout on metadata recv")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta = version_metadata{}
|
||||
if !meta.decode(metaBytes) || !meta.check() {
|
||||
return errors.New("failed to decode metadata")
|
||||
}
|
||||
base := version_getBaseMetadata()
|
||||
if meta.ver > base.ver || meta.ver == base.ver && meta.minorVer > base.minorVer {
|
||||
intf.link.core.log.Errorln("Failed to connect to node: " + intf.name + " version: " + fmt.Sprintf("%d.%d", meta.ver, meta.minorVer))
|
||||
return errors.New("failed to connect: wrong version")
|
||||
}
|
||||
// Check if the remote side matches the keys we expected. This is a bit of a weak
|
||||
// check - in future versions we really should check a signature or something like that.
|
||||
if pinned := intf.options.pinnedCurve25519Keys; pinned != nil {
|
||||
if _, allowed := pinned[meta.box]; !allowed {
|
||||
intf.link.core.log.Errorf("Failed to connect to node: %q sent curve25519 key that does not match pinned keys", intf.name)
|
||||
return fmt.Errorf("failed to connect: host sent curve25519 key that does not match pinned keys")
|
||||
}
|
||||
}
|
||||
if pinned := intf.options.pinnedEd25519Keys; pinned != nil {
|
||||
if _, allowed := pinned[meta.sig]; !allowed {
|
||||
intf.link.core.log.Errorf("Failed to connect to node: %q sent ed25519 key that does not match pinned keys", intf.name)
|
||||
return fmt.Errorf("failed to connect: host sent ed25519 key that does not match pinned keys")
|
||||
}
|
||||
}
|
||||
// Check if we're authorized to connect to this key / IP
|
||||
if intf.incoming && !intf.force && !intf.link.core.peers.isAllowedEncryptionPublicKey(&meta.box) {
|
||||
intf.link.core.log.Warnf("%s connection from %s forbidden: AllowedEncryptionPublicKeys does not contain key %s",
|
||||
strings.ToUpper(intf.info.linkType), intf.info.remote, hex.EncodeToString(meta.box[:]))
|
||||
intf.msgIO.close()
|
||||
return nil
|
||||
}
|
||||
// Check if we already have a link to this node
|
||||
intf.info.box = meta.box
|
||||
intf.info.sig = meta.sig
|
||||
intf.link.mutex.Lock()
|
||||
if oldIntf, isIn := intf.link.interfaces[intf.info]; isIn {
|
||||
intf.link.mutex.Unlock()
|
||||
// FIXME we should really return an error and let the caller block instead
|
||||
// That lets them do things like close connections on its own, avoid printing a connection message in the first place, etc.
|
||||
intf.link.core.log.Debugln("DEBUG: found existing interface for", intf.name)
|
||||
intf.msgIO.close()
|
||||
if !intf.incoming {
|
||||
// Block outgoing connection attempts until the existing connection closes
|
||||
<-oldIntf.closed
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
intf.closed = make(chan struct{})
|
||||
intf.link.interfaces[intf.info] = intf
|
||||
defer func() {
|
||||
intf.link.mutex.Lock()
|
||||
delete(intf.link.interfaces, intf.info)
|
||||
intf.link.mutex.Unlock()
|
||||
close(intf.closed)
|
||||
}()
|
||||
intf.link.core.log.Debugln("DEBUG: registered interface for", intf.name)
|
||||
}
|
||||
intf.link.mutex.Unlock()
|
||||
// Create peer
|
||||
shared := crypto.GetSharedKey(myLinkPriv, &meta.link)
|
||||
intf.peer = intf.link.core.peers.newPeer(&meta.box, &meta.sig, shared, intf, func() { intf.msgIO.close() })
|
||||
if intf.peer == nil {
|
||||
return errors.New("failed to create peer")
|
||||
}
|
||||
defer func() {
|
||||
// More cleanup can go here
|
||||
intf.link.core.peers.removePeer(intf.peer.port)
|
||||
}()
|
||||
intf.peer.out = func(msgs [][]byte) {
|
||||
intf.writer.sendFrom(intf.peer, msgs, false)
|
||||
}
|
||||
intf.peer.linkOut = func(bs []byte) {
|
||||
intf.writer.sendFrom(intf.peer, [][]byte{bs}, true)
|
||||
}
|
||||
themAddr := address.AddrForNodeID(crypto.GetNodeID(&intf.info.box))
|
||||
themAddrString := net.IP(themAddr[:]).String()
|
||||
themString := fmt.Sprintf("%s@%s", themAddrString, intf.info.remote)
|
||||
intf.link.core.log.Infof("Connected %s: %s, source %s",
|
||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
|
||||
// Start things
|
||||
go intf.peer.start()
|
||||
intf.reader.Act(nil, intf.reader._read)
|
||||
// Wait for the reader to finish
|
||||
// TODO find a way to do this without keeping live goroutines around
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
go func() {
|
||||
select {
|
||||
case <-intf.link.stopped:
|
||||
intf.msgIO.close()
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
err = <-intf.reader.err
|
||||
// TODO don't report an error if it's just a 'use of closed network connection'
|
||||
if err != nil {
|
||||
intf.link.core.log.Infof("Disconnected %s: %s, source %s; error: %s",
|
||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local, err)
|
||||
} else {
|
||||
intf.link.core.log.Infof("Disconnected %s: %s, source %s",
|
||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const (
|
||||
sendTime = 1 * time.Second // How long to wait before deciding a send is blocked
|
||||
keepAliveTime = 2 * time.Second // How long to wait before sending a keep-alive response if we have no real traffic to send
|
||||
stallTime = 6 * time.Second // How long to wait for response traffic before deciding the connection has stalled
|
||||
closeTime = 2 * switch_timeout // How long to wait before closing the link
|
||||
)
|
||||
|
||||
// notify the intf that we're currently sending
|
||||
func (intf *linkInterface) notifySending(size int, isLinkTraffic bool) {
|
||||
intf.Act(&intf.writer, func() {
|
||||
if !isLinkTraffic {
|
||||
intf.inSwitch = false
|
||||
}
|
||||
intf.sendTimer = time.AfterFunc(sendTime, intf.notifyBlockedSend)
|
||||
intf._cancelStallTimer()
|
||||
})
|
||||
}
|
||||
|
||||
// called by an AfterFunc if we seem to be blocked in a send syscall for a long time
|
||||
func (intf *linkInterface) _notifySyscall() {
|
||||
intf.link.core.switchTable.Act(intf, func() {
|
||||
intf.link.core.switchTable._sendingIn(intf.peer.port)
|
||||
})
|
||||
}
|
||||
|
||||
// we just sent something, so cancel any pending timer to send keep-alive traffic
|
||||
func (intf *linkInterface) _cancelStallTimer() {
|
||||
if intf.stallTimer != nil {
|
||||
intf.stallTimer.Stop()
|
||||
intf.stallTimer = nil
|
||||
}
|
||||
}
|
||||
|
||||
// This gets called from a time.AfterFunc, and notifies the switch that we appear
|
||||
// to have gotten blocked on a write, so the switch should start routing traffic
|
||||
// through other links, if alternatives exist
|
||||
func (intf *linkInterface) notifyBlockedSend() {
|
||||
intf.Act(nil, func() {
|
||||
if intf.sendTimer != nil {
|
||||
//As far as we know, we're still trying to send, and the timer fired.
|
||||
intf.link.core.switchTable.blockPeer(intf.peer.port)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// notify the intf that we've finished sending, returning the peer to the switch
|
||||
func (intf *linkInterface) notifySent(size int, isLinkTraffic bool) {
|
||||
intf.Act(&intf.writer, func() {
|
||||
intf.sendTimer.Stop()
|
||||
intf.sendTimer = nil
|
||||
if !isLinkTraffic {
|
||||
intf._notifySwitch()
|
||||
}
|
||||
if size > 0 && intf.stallTimer == nil {
|
||||
intf.stallTimer = time.AfterFunc(stallTime, intf.notifyStalled)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Notify the switch that we're ready for more traffic, assuming we're not in a stalled state
|
||||
func (intf *linkInterface) _notifySwitch() {
|
||||
if !intf.inSwitch {
|
||||
if intf.stalled {
|
||||
intf.unstalled = false
|
||||
} else {
|
||||
intf.inSwitch = true
|
||||
intf.link.core.switchTable.Act(intf, func() {
|
||||
intf.link.core.switchTable._idleIn(intf.peer.port)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the peer as stalled, to prevent them from returning to the switch until a read succeeds
|
||||
func (intf *linkInterface) notifyStalled() {
|
||||
intf.Act(nil, func() { // Sent from a time.AfterFunc
|
||||
if intf.stallTimer != nil {
|
||||
intf.stallTimer.Stop()
|
||||
intf.stallTimer = nil
|
||||
intf.stalled = true
|
||||
intf.link.core.switchTable.blockPeer(intf.peer.port)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// reset the close timer
|
||||
func (intf *linkInterface) notifyReading() {
|
||||
intf.Act(&intf.reader, func() {
|
||||
if intf.closeTimer != nil {
|
||||
intf.closeTimer.Stop()
|
||||
}
|
||||
intf.closeTimer = time.AfterFunc(closeTime, func() { intf.msgIO.close() })
|
||||
})
|
||||
}
|
||||
|
||||
// wake up the link if it was stalled, and (if size > 0) prepare to send keep-alive traffic
|
||||
func (intf *linkInterface) notifyRead(size int) {
|
||||
intf.Act(&intf.reader, func() {
|
||||
if intf.stallTimer != nil {
|
||||
intf.stallTimer.Stop()
|
||||
intf.stallTimer = nil
|
||||
}
|
||||
intf.stalled = false
|
||||
if !intf.unstalled {
|
||||
intf._notifySwitch()
|
||||
intf.unstalled = true
|
||||
}
|
||||
if size > 0 && intf.stallTimer == nil {
|
||||
intf.stallTimer = time.AfterFunc(keepAliveTime, intf.notifyDoKeepAlive)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// We need to send keep-alive traffic now
|
||||
func (intf *linkInterface) notifyDoKeepAlive() {
|
||||
intf.Act(nil, func() { // Sent from a time.AfterFunc
|
||||
if intf.stallTimer != nil {
|
||||
intf.stallTimer.Stop()
|
||||
intf.stallTimer = nil
|
||||
intf.writer.sendFrom(nil, [][]byte{nil}, true) // Empty keep-alive traffic
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type linkWriter struct {
|
||||
phony.Inbox
|
||||
intf *linkInterface
|
||||
}
|
||||
|
||||
func (w *linkWriter) sendFrom(from phony.Actor, bss [][]byte, isLinkTraffic bool) {
|
||||
w.Act(from, func() {
|
||||
var size int
|
||||
for _, bs := range bss {
|
||||
size += len(bs)
|
||||
}
|
||||
w.intf.notifySending(size, isLinkTraffic)
|
||||
// start a timer that will fire if we get stuck in writeMsgs for an oddly long time
|
||||
var once sync.Once
|
||||
timer := time.AfterFunc(time.Millisecond, func() {
|
||||
// 1 ms is kind of arbitrary
|
||||
// the rationale is that this should be very long compared to a syscall
|
||||
// but it's still short compared to end-to-end latency or human perception
|
||||
once.Do(func() {
|
||||
w.intf.Act(nil, w.intf._notifySyscall)
|
||||
})
|
||||
})
|
||||
w.intf.msgIO.writeMsgs(bss)
|
||||
// Make sure we either stop the timer from doing anything or wait until it's done
|
||||
once.Do(func() { timer.Stop() })
|
||||
w.intf.notifySent(size, isLinkTraffic)
|
||||
// Cleanup
|
||||
for _, bs := range bss {
|
||||
util.PutBytes(bs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type linkReader struct {
|
||||
phony.Inbox
|
||||
intf *linkInterface
|
||||
err chan error
|
||||
}
|
||||
|
||||
func (r *linkReader) _read() {
|
||||
r.intf.notifyReading()
|
||||
msg, err := r.intf.msgIO.readMsg()
|
||||
r.intf.notifyRead(len(msg))
|
||||
if len(msg) > 0 {
|
||||
r.intf.peer.handlePacketFrom(r, msg)
|
||||
}
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
r.err <- err
|
||||
}
|
||||
close(r.err)
|
||||
return
|
||||
}
|
||||
// Now try to read again
|
||||
r.Act(nil, r._read)
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Listener waits for incoming sessions
|
||||
type Listener struct {
|
||||
core *Core
|
||||
conn chan *Conn
|
||||
close chan interface{}
|
||||
}
|
||||
|
||||
// Accept blocks until a new incoming session is received
|
||||
func (l *Listener) Accept() (net.Conn, error) {
|
||||
select {
|
||||
case c, ok := <-l.conn:
|
||||
if !ok {
|
||||
return nil, errors.New("listener closed")
|
||||
}
|
||||
return c, nil
|
||||
case <-l.close:
|
||||
return nil, errors.New("listener closed")
|
||||
}
|
||||
}
|
||||
|
||||
// Close will stop the listener
|
||||
func (l *Listener) Close() (err error) {
|
||||
defer func() {
|
||||
recover()
|
||||
err = errors.New("already closed")
|
||||
}()
|
||||
if l.core.router.sessions.listener == l {
|
||||
l.core.router.sessions.listener = nil
|
||||
}
|
||||
close(l.close)
|
||||
close(l.conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Addr returns the address of the listener
|
||||
func (l *Listener) Addr() net.Addr {
|
||||
return &l.core.boxPub
|
||||
}
|
|
@ -1,208 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
||||
)
|
||||
|
||||
type nodeinfo struct {
|
||||
phony.Inbox
|
||||
core *Core
|
||||
myNodeInfo NodeInfoPayload
|
||||
callbacks map[crypto.BoxPubKey]nodeinfoCallback
|
||||
cache map[crypto.BoxPubKey]nodeinfoCached
|
||||
}
|
||||
|
||||
type nodeinfoCached struct {
|
||||
payload NodeInfoPayload
|
||||
created time.Time
|
||||
}
|
||||
|
||||
type nodeinfoCallback struct {
|
||||
call func(nodeinfo *NodeInfoPayload)
|
||||
created time.Time
|
||||
}
|
||||
|
||||
// Represents a session nodeinfo packet.
|
||||
type nodeinfoReqRes struct {
|
||||
SendPermPub crypto.BoxPubKey // Sender's permanent key
|
||||
SendCoords []byte // Sender's coords
|
||||
IsResponse bool
|
||||
NodeInfo NodeInfoPayload
|
||||
}
|
||||
|
||||
// Initialises the nodeinfo cache/callback maps, and starts a goroutine to keep
|
||||
// the cache/callback maps clean of stale entries
|
||||
func (m *nodeinfo) init(core *Core) {
|
||||
m.Act(nil, func() {
|
||||
m._init(core)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _init(core *Core) {
|
||||
m.core = core
|
||||
m.callbacks = make(map[crypto.BoxPubKey]nodeinfoCallback)
|
||||
m.cache = make(map[crypto.BoxPubKey]nodeinfoCached)
|
||||
|
||||
m._cleanup()
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _cleanup() {
|
||||
for boxPubKey, callback := range m.callbacks {
|
||||
if time.Since(callback.created) > time.Minute {
|
||||
delete(m.callbacks, boxPubKey)
|
||||
}
|
||||
}
|
||||
for boxPubKey, cache := range m.cache {
|
||||
if time.Since(cache.created) > time.Hour {
|
||||
delete(m.cache, boxPubKey)
|
||||
}
|
||||
}
|
||||
time.AfterFunc(time.Second*30, func() {
|
||||
m.Act(nil, m._cleanup)
|
||||
})
|
||||
}
|
||||
|
||||
// Add a callback for a nodeinfo lookup
|
||||
func (m *nodeinfo) addCallback(sender crypto.BoxPubKey, call func(nodeinfo *NodeInfoPayload)) {
|
||||
m.Act(nil, func() {
|
||||
m._addCallback(sender, call)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _addCallback(sender crypto.BoxPubKey, call func(nodeinfo *NodeInfoPayload)) {
|
||||
m.callbacks[sender] = nodeinfoCallback{
|
||||
created: time.Now(),
|
||||
call: call,
|
||||
}
|
||||
}
|
||||
|
||||
// Handles the callback, if there is one
|
||||
func (m *nodeinfo) _callback(sender crypto.BoxPubKey, nodeinfo NodeInfoPayload) {
|
||||
if callback, ok := m.callbacks[sender]; ok {
|
||||
callback.call(&nodeinfo)
|
||||
delete(m.callbacks, sender)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the current node's nodeinfo
|
||||
func (m *nodeinfo) getNodeInfo() (p NodeInfoPayload) {
|
||||
phony.Block(m, func() {
|
||||
p = m._getNodeInfo()
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _getNodeInfo() NodeInfoPayload {
|
||||
return m.myNodeInfo
|
||||
}
|
||||
|
||||
// Set the current node's nodeinfo
|
||||
func (m *nodeinfo) setNodeInfo(given interface{}, privacy bool) (err error) {
|
||||
phony.Block(m, func() {
|
||||
err = m._setNodeInfo(given, privacy)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _setNodeInfo(given interface{}, privacy bool) error {
|
||||
defaults := map[string]interface{}{
|
||||
"buildname": version.BuildName(),
|
||||
"buildversion": version.BuildVersion(),
|
||||
"buildplatform": runtime.GOOS,
|
||||
"buildarch": runtime.GOARCH,
|
||||
}
|
||||
newnodeinfo := make(map[string]interface{})
|
||||
if !privacy {
|
||||
for k, v := range defaults {
|
||||
newnodeinfo[k] = v
|
||||
}
|
||||
}
|
||||
if nodeinfomap, ok := given.(map[string]interface{}); ok {
|
||||
for key, value := range nodeinfomap {
|
||||
if _, ok := defaults[key]; ok {
|
||||
if strvalue, strok := value.(string); strok && strings.EqualFold(strvalue, "null") || value == nil {
|
||||
delete(newnodeinfo, key)
|
||||
}
|
||||
continue
|
||||
}
|
||||
newnodeinfo[key] = value
|
||||
}
|
||||
}
|
||||
if newjson, err := json.Marshal(newnodeinfo); err == nil {
|
||||
if len(newjson) > 16384 {
|
||||
return errors.New("NodeInfo exceeds max length of 16384 bytes")
|
||||
}
|
||||
m.myNodeInfo = newjson
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add nodeinfo into the cache for a node
|
||||
func (m *nodeinfo) _addCachedNodeInfo(key crypto.BoxPubKey, payload NodeInfoPayload) {
|
||||
m.cache[key] = nodeinfoCached{
|
||||
created: time.Now(),
|
||||
payload: payload,
|
||||
}
|
||||
}
|
||||
|
||||
// Get a nodeinfo entry from the cache
|
||||
func (m *nodeinfo) _getCachedNodeInfo(key crypto.BoxPubKey) (NodeInfoPayload, error) {
|
||||
if nodeinfo, ok := m.cache[key]; ok {
|
||||
return nodeinfo.payload, nil
|
||||
}
|
||||
return NodeInfoPayload{}, errors.New("No cache entry found")
|
||||
}
|
||||
|
||||
// Handles a nodeinfo request/response - called from the router
|
||||
func (m *nodeinfo) handleNodeInfo(from phony.Actor, nodeinfo *nodeinfoReqRes) {
|
||||
m.Act(from, func() {
|
||||
m._handleNodeInfo(nodeinfo)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _handleNodeInfo(nodeinfo *nodeinfoReqRes) {
|
||||
if nodeinfo.IsResponse {
|
||||
m._callback(nodeinfo.SendPermPub, nodeinfo.NodeInfo)
|
||||
m._addCachedNodeInfo(nodeinfo.SendPermPub, nodeinfo.NodeInfo)
|
||||
} else {
|
||||
m._sendNodeInfo(nodeinfo.SendPermPub, nodeinfo.SendCoords, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Send nodeinfo request or response - called from the router
|
||||
func (m *nodeinfo) sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse bool) {
|
||||
m.Act(nil, func() {
|
||||
m._sendNodeInfo(key, coords, isResponse)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *nodeinfo) _sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse bool) {
|
||||
table := m.core.switchTable.table.Load().(lookupTable)
|
||||
nodeinfo := nodeinfoReqRes{
|
||||
SendCoords: table.self.getCoords(),
|
||||
IsResponse: isResponse,
|
||||
NodeInfo: m._getNodeInfo(),
|
||||
}
|
||||
bs := nodeinfo.encode()
|
||||
shared := m.core.router.sessions.getSharedKey(&m.core.boxPriv, &key)
|
||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
Coords: coords,
|
||||
ToKey: key,
|
||||
FromKey: m.core.boxPub,
|
||||
Nonce: *nonce,
|
||||
Payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
m.core.router.out(packet)
|
||||
}
|
|
@ -1,380 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
// TODO cleanup, this file is kind of a mess
|
||||
// Commented code should be removed
|
||||
// Live code should be better commented
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
// The peers struct represents peers with an active connection.
|
||||
// Incoming packets are passed to the corresponding peer, which handles them somehow.
|
||||
// In most cases, this involves passing the packet to the handler for outgoing traffic to another peer.
|
||||
// In other cases, its link protocol traffic is used to build the spanning tree, in which case this checks signatures and passes the message along to the switch.
|
||||
type peers struct {
|
||||
core *Core
|
||||
mutex sync.Mutex // Synchronize writes to atomic
|
||||
ports atomic.Value //map[switchPort]*peer, use CoW semantics
|
||||
}
|
||||
|
||||
// Initializes the peers struct.
|
||||
func (ps *peers) init(c *Core) {
|
||||
ps.mutex.Lock()
|
||||
defer ps.mutex.Unlock()
|
||||
ps.putPorts(make(map[switchPort]*peer))
|
||||
ps.core = c
|
||||
}
|
||||
|
||||
func (ps *peers) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything to do
|
||||
}
|
||||
|
||||
// Returns true if an incoming peer connection to a key is allowed, either
|
||||
// because the key is in the whitelist or because the whitelist is empty.
|
||||
func (ps *peers) isAllowedEncryptionPublicKey(box *crypto.BoxPubKey) bool {
|
||||
boxstr := hex.EncodeToString(box[:])
|
||||
ps.core.config.Mutex.RLock()
|
||||
defer ps.core.config.Mutex.RUnlock()
|
||||
for _, v := range ps.core.config.Current.AllowedEncryptionPublicKeys {
|
||||
if v == boxstr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return len(ps.core.config.Current.AllowedEncryptionPublicKeys) == 0
|
||||
}
|
||||
|
||||
// Adds a key to the whitelist.
|
||||
func (ps *peers) addAllowedEncryptionPublicKey(box string) {
|
||||
ps.core.config.Mutex.RLock()
|
||||
defer ps.core.config.Mutex.RUnlock()
|
||||
ps.core.config.Current.AllowedEncryptionPublicKeys =
|
||||
append(ps.core.config.Current.AllowedEncryptionPublicKeys, box)
|
||||
}
|
||||
|
||||
// Removes a key from the whitelist.
|
||||
func (ps *peers) removeAllowedEncryptionPublicKey(box string) {
|
||||
ps.core.config.Mutex.RLock()
|
||||
defer ps.core.config.Mutex.RUnlock()
|
||||
for k, v := range ps.core.config.Current.AllowedEncryptionPublicKeys {
|
||||
if v == box {
|
||||
ps.core.config.Current.AllowedEncryptionPublicKeys =
|
||||
append(ps.core.config.Current.AllowedEncryptionPublicKeys[:k],
|
||||
ps.core.config.Current.AllowedEncryptionPublicKeys[k+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Gets the whitelist of allowed keys for incoming connections.
|
||||
func (ps *peers) getAllowedEncryptionPublicKeys() []string {
|
||||
ps.core.config.Mutex.RLock()
|
||||
defer ps.core.config.Mutex.RUnlock()
|
||||
return ps.core.config.Current.AllowedEncryptionPublicKeys
|
||||
}
|
||||
|
||||
// Atomically gets a map[switchPort]*peer of known peers.
|
||||
func (ps *peers) getPorts() map[switchPort]*peer {
|
||||
return ps.ports.Load().(map[switchPort]*peer)
|
||||
}
|
||||
|
||||
// Stores a map[switchPort]*peer (note that you should take a mutex before store operations to avoid conflicts with other nodes attempting to read/change/store at the same time).
|
||||
func (ps *peers) putPorts(ports map[switchPort]*peer) {
|
||||
ps.ports.Store(ports)
|
||||
}
|
||||
|
||||
// Information known about a peer, including their box/sig keys, precomputed shared keys (static and ephemeral) and a handler for their outgoing traffic
|
||||
type peer struct {
|
||||
phony.Inbox
|
||||
core *Core
|
||||
intf *linkInterface
|
||||
port switchPort
|
||||
box crypto.BoxPubKey
|
||||
sig crypto.SigPubKey
|
||||
shared crypto.BoxSharedKey
|
||||
linkShared crypto.BoxSharedKey
|
||||
endpoint string
|
||||
firstSeen time.Time // To track uptime for getPeers
|
||||
linkOut func([]byte) // used for protocol traffic (bypasses the switch)
|
||||
dinfo *dhtInfo // used to keep the DHT working
|
||||
out func([][]byte) // Set up by whatever created the peers struct, used to send packets to other nodes
|
||||
done (chan struct{}) // closed to exit the linkLoop
|
||||
close func() // Called when a peer is removed, to close the underlying connection, or via admin api
|
||||
// The below aren't actually useful internally, they're just gathered for getPeers statistics
|
||||
bytesSent uint64
|
||||
bytesRecvd uint64
|
||||
}
|
||||
|
||||
// Creates a new peer with the specified box, sig, and linkShared keys, using the lowest unoccupied port number.
|
||||
func (ps *peers) newPeer(box *crypto.BoxPubKey, sig *crypto.SigPubKey, linkShared *crypto.BoxSharedKey, intf *linkInterface, closer func()) *peer {
|
||||
now := time.Now()
|
||||
p := peer{box: *box,
|
||||
sig: *sig,
|
||||
shared: *crypto.GetSharedKey(&ps.core.boxPriv, box),
|
||||
linkShared: *linkShared,
|
||||
firstSeen: now,
|
||||
done: make(chan struct{}),
|
||||
close: closer,
|
||||
core: ps.core,
|
||||
intf: intf,
|
||||
}
|
||||
ps.mutex.Lock()
|
||||
defer ps.mutex.Unlock()
|
||||
oldPorts := ps.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k, v := range oldPorts {
|
||||
newPorts[k] = v
|
||||
}
|
||||
for idx := switchPort(0); true; idx++ {
|
||||
if _, isIn := newPorts[idx]; !isIn {
|
||||
p.port = switchPort(idx)
|
||||
newPorts[p.port] = &p
|
||||
break
|
||||
}
|
||||
}
|
||||
ps.putPorts(newPorts)
|
||||
return &p
|
||||
}
|
||||
|
||||
// Removes a peer for a given port, if one exists.
|
||||
func (ps *peers) removePeer(port switchPort) {
|
||||
if port == 0 {
|
||||
return
|
||||
} // Can't remove self peer
|
||||
phony.Block(&ps.core.router, func() {
|
||||
ps.core.switchTable.forgetPeer(port)
|
||||
})
|
||||
ps.mutex.Lock()
|
||||
oldPorts := ps.getPorts()
|
||||
p, isIn := oldPorts[port]
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k, v := range oldPorts {
|
||||
newPorts[k] = v
|
||||
}
|
||||
delete(newPorts, port)
|
||||
ps.putPorts(newPorts)
|
||||
ps.mutex.Unlock()
|
||||
if isIn {
|
||||
if p.close != nil {
|
||||
p.close()
|
||||
}
|
||||
close(p.done)
|
||||
}
|
||||
}
|
||||
|
||||
// If called, sends a notification to each peer that they should send a new switch message.
|
||||
// Mainly called by the switch after an update.
|
||||
func (ps *peers) sendSwitchMsgs(from phony.Actor) {
|
||||
ports := ps.getPorts()
|
||||
for _, p := range ports {
|
||||
if p.port == 0 {
|
||||
continue
|
||||
}
|
||||
p.Act(from, p._sendSwitchMsg)
|
||||
}
|
||||
}
|
||||
|
||||
// This must be launched in a separate goroutine by whatever sets up the peer struct.
|
||||
// It handles link protocol traffic.
|
||||
func (p *peer) start() {
|
||||
var updateDHT func()
|
||||
updateDHT = func() {
|
||||
phony.Block(p, func() {
|
||||
select {
|
||||
case <-p.done:
|
||||
default:
|
||||
p._updateDHT()
|
||||
time.AfterFunc(time.Second, updateDHT)
|
||||
}
|
||||
})
|
||||
}
|
||||
updateDHT()
|
||||
// Just for good measure, immediately send a switch message to this peer when we start
|
||||
p.Act(nil, p._sendSwitchMsg)
|
||||
}
|
||||
|
||||
func (p *peer) _updateDHT() {
|
||||
if p.dinfo != nil {
|
||||
p.core.router.insertPeer(p, p.dinfo)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) handlePacketFrom(from phony.Actor, packet []byte) {
|
||||
p.Act(from, func() {
|
||||
p._handlePacket(packet)
|
||||
})
|
||||
}
|
||||
|
||||
// Called to handle incoming packets.
|
||||
// Passes the packet to a handler for that packet type.
|
||||
func (p *peer) _handlePacket(packet []byte) {
|
||||
// FIXME this is off by stream padding and msg length overhead, should be done in tcp.go
|
||||
p.bytesRecvd += uint64(len(packet))
|
||||
pType, pTypeLen := wire_decode_uint64(packet)
|
||||
if pTypeLen == 0 {
|
||||
return
|
||||
}
|
||||
switch pType {
|
||||
case wire_Traffic:
|
||||
p._handleTraffic(packet)
|
||||
case wire_ProtocolTraffic:
|
||||
p._handleTraffic(packet)
|
||||
case wire_LinkProtocolTraffic:
|
||||
p._handleLinkTraffic(packet)
|
||||
default:
|
||||
util.PutBytes(packet)
|
||||
}
|
||||
}
|
||||
|
||||
// Called to handle traffic or protocolTraffic packets.
|
||||
// In either case, this reads from the coords of the packet header, does a switch lookup, and forwards to the next node.
|
||||
func (p *peer) _handleTraffic(packet []byte) {
|
||||
table := p.core.switchTable.getTable()
|
||||
if _, isIn := table.elems[p.port]; !isIn && p.port != 0 {
|
||||
// Drop traffic if the peer isn't in the switch
|
||||
return
|
||||
}
|
||||
p.core.switchTable.packetInFrom(p, packet)
|
||||
}
|
||||
|
||||
func (p *peer) sendPacketsFrom(from phony.Actor, packets [][]byte) {
|
||||
p.Act(from, func() {
|
||||
p._sendPackets(packets)
|
||||
})
|
||||
}
|
||||
|
||||
// This just calls p.out(packet) for now.
|
||||
func (p *peer) _sendPackets(packets [][]byte) {
|
||||
// Is there ever a case where something more complicated is needed?
|
||||
// What if p.out blocks?
|
||||
var size int
|
||||
for _, packet := range packets {
|
||||
size += len(packet)
|
||||
}
|
||||
p.bytesSent += uint64(size)
|
||||
p.out(packets)
|
||||
}
|
||||
|
||||
// This wraps the packet in the inner (ephemeral) and outer (permanent) crypto layers.
|
||||
// It sends it to p.linkOut, which bypasses the usual packet queues.
|
||||
func (p *peer) _sendLinkPacket(packet []byte) {
|
||||
innerPayload, innerNonce := crypto.BoxSeal(&p.linkShared, packet, nil)
|
||||
innerLinkPacket := wire_linkProtoTrafficPacket{
|
||||
Nonce: *innerNonce,
|
||||
Payload: innerPayload,
|
||||
}
|
||||
outerPayload := innerLinkPacket.encode()
|
||||
bs, nonce := crypto.BoxSeal(&p.shared, outerPayload, nil)
|
||||
linkPacket := wire_linkProtoTrafficPacket{
|
||||
Nonce: *nonce,
|
||||
Payload: bs,
|
||||
}
|
||||
packet = linkPacket.encode()
|
||||
p.linkOut(packet)
|
||||
}
|
||||
|
||||
// Decrypts the outer (permanent) and inner (ephemeral) crypto layers on link traffic.
|
||||
// Identifies the link traffic type and calls the appropriate handler.
|
||||
func (p *peer) _handleLinkTraffic(bs []byte) {
|
||||
packet := wire_linkProtoTrafficPacket{}
|
||||
if !packet.decode(bs) {
|
||||
return
|
||||
}
|
||||
outerPayload, isOK := crypto.BoxOpen(&p.shared, packet.Payload, &packet.Nonce)
|
||||
if !isOK {
|
||||
return
|
||||
}
|
||||
innerPacket := wire_linkProtoTrafficPacket{}
|
||||
if !innerPacket.decode(outerPayload) {
|
||||
return
|
||||
}
|
||||
payload, isOK := crypto.BoxOpen(&p.linkShared, innerPacket.Payload, &innerPacket.Nonce)
|
||||
if !isOK {
|
||||
return
|
||||
}
|
||||
pType, pTypeLen := wire_decode_uint64(payload)
|
||||
if pTypeLen == 0 {
|
||||
return
|
||||
}
|
||||
switch pType {
|
||||
case wire_SwitchMsg:
|
||||
p._handleSwitchMsg(payload)
|
||||
default:
|
||||
util.PutBytes(bs)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a switchMsg from the switch, adds signed next-hop info for this peer, and sends it to them.
|
||||
func (p *peer) _sendSwitchMsg() {
|
||||
msg := p.core.switchTable.getMsg()
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
bs := getBytesForSig(&p.sig, msg)
|
||||
msg.Hops = append(msg.Hops, switchMsgHop{
|
||||
Port: p.port,
|
||||
Next: p.sig,
|
||||
Sig: *crypto.Sign(&p.core.sigPriv, bs),
|
||||
})
|
||||
packet := msg.encode()
|
||||
p._sendLinkPacket(packet)
|
||||
}
|
||||
|
||||
// Handles a switchMsg from the peer, checking signatures and passing good messages to the switch.
|
||||
// Also creates a dhtInfo struct and arranges for it to be added to the dht (this is how dht bootstrapping begins).
|
||||
func (p *peer) _handleSwitchMsg(packet []byte) {
|
||||
var msg switchMsg
|
||||
if !msg.decode(packet) {
|
||||
return
|
||||
}
|
||||
if len(msg.Hops) < 1 {
|
||||
p.core.peers.removePeer(p.port)
|
||||
}
|
||||
var loc switchLocator
|
||||
prevKey := msg.Root
|
||||
for idx, hop := range msg.Hops {
|
||||
// Check signatures and collect coords for dht
|
||||
sigMsg := msg
|
||||
sigMsg.Hops = msg.Hops[:idx]
|
||||
loc.coords = append(loc.coords, hop.Port)
|
||||
bs := getBytesForSig(&hop.Next, &sigMsg)
|
||||
if !crypto.Verify(&prevKey, bs, &hop.Sig) {
|
||||
p.core.peers.removePeer(p.port)
|
||||
}
|
||||
prevKey = hop.Next
|
||||
}
|
||||
p.core.switchTable.handleMsg(&msg, p.port)
|
||||
if !p.core.switchTable.checkRoot(&msg) {
|
||||
// Bad switch message
|
||||
p.dinfo = nil
|
||||
return
|
||||
}
|
||||
// Pass a message to the dht informing it that this peer (still) exists
|
||||
loc.coords = loc.coords[:len(loc.coords)-1]
|
||||
p.dinfo = &dhtInfo{
|
||||
key: p.box,
|
||||
coords: loc.getCoords(),
|
||||
}
|
||||
p._updateDHT()
|
||||
}
|
||||
|
||||
// This generates the bytes that we sign or check the signature of for a switchMsg.
|
||||
// It begins with the next node's key, followed by the root and the timestamp, followed by coords being advertised to the next node.
|
||||
func getBytesForSig(next *crypto.SigPubKey, msg *switchMsg) []byte {
|
||||
var loc switchLocator
|
||||
for _, hop := range msg.Hops {
|
||||
loc.coords = append(loc.coords, hop.Port)
|
||||
}
|
||||
bs := append([]byte(nil), next[:]...)
|
||||
bs = append(bs, msg.Root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(msg.TStamp))...)
|
||||
bs = append(bs, wire_encode_coords(loc.getCoords())...)
|
||||
return bs
|
||||
}
|
|
@ -1,254 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
// This part does most of the work to handle packets to/from yourself
|
||||
// It also manages crypto and dht info
|
||||
// TODO clean up old/unused code, maybe improve comments on whatever is left
|
||||
|
||||
// Send:
|
||||
// Receive a packet from the adapter
|
||||
// Look up session (if none exists, trigger a search)
|
||||
// Hand off to session (which encrypts, etc)
|
||||
// Session will pass it back to router.out, which hands it off to the self peer
|
||||
// The self peer triggers a lookup to find which peer to send to next
|
||||
// And then passes it to that's peer's peer.out function
|
||||
// The peer.out function sends it over the wire to the matching peer
|
||||
|
||||
// Recv:
|
||||
// A packet comes in off the wire, and goes to a peer.handlePacket
|
||||
// The peer does a lookup, sees no better peer than the self
|
||||
// Hands it to the self peer.out, which passes it to router.in
|
||||
// If it's dht/seach/etc. traffic, the router passes it to that part
|
||||
// If it's an encapsulated IPv6 packet, the router looks up the session for it
|
||||
// The packet is passed to the session, which decrypts it, router.recvPacket
|
||||
// The router then runs some sanity checks before passing it to the adapter
|
||||
|
||||
import (
|
||||
//"bytes"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
// The router struct has channels to/from the adapter device and a self peer (0), which is how messages are passed between this node and the peers/switch layer.
|
||||
// The router's phony.Inbox goroutine is responsible for managing all information related to the dht, searches, and crypto sessions.
|
||||
type router struct {
|
||||
phony.Inbox
|
||||
core *Core
|
||||
addr address.Address
|
||||
subnet address.Subnet
|
||||
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
||||
dht dht
|
||||
nodeinfo nodeinfo
|
||||
searches searches
|
||||
sessions sessions
|
||||
}
|
||||
|
||||
// Initializes the router struct, which includes setting up channels to/from the adapter.
|
||||
func (r *router) init(core *Core) {
|
||||
r.core = core
|
||||
r.addr = *address.AddrForNodeID(&r.dht.nodeID)
|
||||
r.subnet = *address.SubnetForNodeID(&r.dht.nodeID)
|
||||
self := linkInterface{
|
||||
name: "(self)",
|
||||
info: linkInfo{
|
||||
local: "(self)",
|
||||
remote: "(self)",
|
||||
linkType: "self",
|
||||
},
|
||||
}
|
||||
p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub, &crypto.BoxSharedKey{}, &self, nil)
|
||||
p.out = func(packets [][]byte) { r.handlePackets(p, packets) }
|
||||
r.out = func(bs []byte) { p.handlePacketFrom(r, bs) }
|
||||
r.nodeinfo.init(r.core)
|
||||
r.core.config.Mutex.RLock()
|
||||
r.nodeinfo.setNodeInfo(r.core.config.Current.NodeInfo, r.core.config.Current.NodeInfoPrivacy)
|
||||
r.core.config.Mutex.RUnlock()
|
||||
r.dht.init(r)
|
||||
r.searches.init(r)
|
||||
r.sessions.init(r)
|
||||
}
|
||||
|
||||
// Reconfigures the router and any child modules. This should only ever be run
|
||||
// by the router actor.
|
||||
func (r *router) reconfigure() {
|
||||
// Reconfigure the router
|
||||
current := r.core.config.GetCurrent()
|
||||
r.core.log.Println("Reloading NodeInfo...")
|
||||
if err := r.nodeinfo.setNodeInfo(current.NodeInfo, current.NodeInfoPrivacy); err != nil {
|
||||
r.core.log.Errorln("Error reloading NodeInfo:", err)
|
||||
} else {
|
||||
r.core.log.Infoln("NodeInfo updated")
|
||||
}
|
||||
// Reconfigure children
|
||||
r.dht.reconfigure()
|
||||
r.searches.reconfigure()
|
||||
r.sessions.reconfigure()
|
||||
}
|
||||
|
||||
// Starts the tickerLoop goroutine.
|
||||
func (r *router) start() error {
|
||||
r.core.log.Infoln("Starting router")
|
||||
go r.doMaintenance()
|
||||
return nil
|
||||
}
|
||||
|
||||
// In practice, the switch will call this with 1 packet
|
||||
func (r *router) handlePackets(from phony.Actor, packets [][]byte) {
|
||||
r.Act(from, func() {
|
||||
for _, packet := range packets {
|
||||
r._handlePacket(packet)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Insert a peer info into the dht, TODO? make the dht a separate actor
|
||||
func (r *router) insertPeer(from phony.Actor, info *dhtInfo) {
|
||||
r.Act(from, func() {
|
||||
r.dht.insertPeer(info)
|
||||
})
|
||||
}
|
||||
|
||||
// Reset sessions and DHT after the switch sees our coords change
|
||||
func (r *router) reset(from phony.Actor) {
|
||||
r.Act(from, func() {
|
||||
r.sessions.reset()
|
||||
r.dht.reset()
|
||||
})
|
||||
}
|
||||
|
||||
// TODO remove reconfigure so this is just a ticker loop
|
||||
// and then find something better than a ticker loop to schedule things...
|
||||
func (r *router) doMaintenance() {
|
||||
phony.Block(r, func() {
|
||||
// Any periodic maintenance stuff goes here
|
||||
r.core.switchTable.doMaintenance()
|
||||
r.dht.doMaintenance()
|
||||
r.sessions.cleanup()
|
||||
})
|
||||
time.AfterFunc(time.Second, r.doMaintenance)
|
||||
}
|
||||
|
||||
// Checks incoming traffic type and passes it to the appropriate handler.
|
||||
func (r *router) _handlePacket(packet []byte) {
|
||||
pType, pTypeLen := wire_decode_uint64(packet)
|
||||
if pTypeLen == 0 {
|
||||
return
|
||||
}
|
||||
switch pType {
|
||||
case wire_Traffic:
|
||||
r._handleTraffic(packet)
|
||||
case wire_ProtocolTraffic:
|
||||
r._handleProto(packet)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Handles incoming traffic, i.e. encapuslated ordinary IPv6 packets.
|
||||
// Passes them to the crypto session worker to be decrypted and sent to the adapter.
|
||||
func (r *router) _handleTraffic(packet []byte) {
|
||||
defer util.PutBytes(packet)
|
||||
p := wire_trafficPacket{}
|
||||
if !p.decode(packet) {
|
||||
return
|
||||
}
|
||||
sinfo, isIn := r.sessions.getSessionForHandle(&p.Handle)
|
||||
if !isIn {
|
||||
util.PutBytes(p.Payload)
|
||||
return
|
||||
}
|
||||
sinfo.recv(r, &p)
|
||||
}
|
||||
|
||||
// Handles protocol traffic by decrypting it, checking its type, and passing it to the appropriate handler for that traffic type.
|
||||
func (r *router) _handleProto(packet []byte) {
|
||||
// First parse the packet
|
||||
p := wire_protoTrafficPacket{}
|
||||
if !p.decode(packet) {
|
||||
return
|
||||
}
|
||||
// Now try to open the payload
|
||||
var sharedKey *crypto.BoxSharedKey
|
||||
if p.ToKey == r.core.boxPub {
|
||||
// Try to open using our permanent key
|
||||
sharedKey = r.sessions.getSharedKey(&r.core.boxPriv, &p.FromKey)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
bs, isOK := crypto.BoxOpen(sharedKey, p.Payload, &p.Nonce)
|
||||
if !isOK {
|
||||
return
|
||||
}
|
||||
// Now do something with the bytes in bs...
|
||||
// send dht messages to dht, sessionRefresh to sessions, data to adapter...
|
||||
// For data, should check that key and IP match...
|
||||
bsType, bsTypeLen := wire_decode_uint64(bs)
|
||||
if bsTypeLen == 0 {
|
||||
return
|
||||
}
|
||||
switch bsType {
|
||||
case wire_SessionPing:
|
||||
r._handlePing(bs, &p.FromKey)
|
||||
case wire_SessionPong:
|
||||
r._handlePong(bs, &p.FromKey)
|
||||
case wire_NodeInfoRequest:
|
||||
fallthrough
|
||||
case wire_NodeInfoResponse:
|
||||
r._handleNodeInfo(bs, &p.FromKey)
|
||||
case wire_DHTLookupRequest:
|
||||
r._handleDHTReq(bs, &p.FromKey)
|
||||
case wire_DHTLookupResponse:
|
||||
r._handleDHTRes(bs, &p.FromKey)
|
||||
default:
|
||||
util.PutBytes(packet)
|
||||
}
|
||||
}
|
||||
|
||||
// Decodes session pings from wire format and passes them to sessions.handlePing where they either create or update a session.
|
||||
func (r *router) _handlePing(bs []byte, fromKey *crypto.BoxPubKey) {
|
||||
ping := sessionPing{}
|
||||
if !ping.decode(bs) {
|
||||
return
|
||||
}
|
||||
ping.SendPermPub = *fromKey
|
||||
r.sessions.handlePing(&ping)
|
||||
}
|
||||
|
||||
// Handles session pongs (which are really pings with an extra flag to prevent acknowledgement).
|
||||
func (r *router) _handlePong(bs []byte, fromKey *crypto.BoxPubKey) {
|
||||
r._handlePing(bs, fromKey)
|
||||
}
|
||||
|
||||
// Decodes dht requests and passes them to dht.handleReq to trigger a lookup/response.
|
||||
func (r *router) _handleDHTReq(bs []byte, fromKey *crypto.BoxPubKey) {
|
||||
req := dhtReq{}
|
||||
if !req.decode(bs) {
|
||||
return
|
||||
}
|
||||
req.Key = *fromKey
|
||||
r.dht.handleReq(&req)
|
||||
}
|
||||
|
||||
// Decodes dht responses and passes them to dht.handleRes to update the DHT table and further pass them to the search code (if applicable).
|
||||
func (r *router) _handleDHTRes(bs []byte, fromKey *crypto.BoxPubKey) {
|
||||
res := dhtRes{}
|
||||
if !res.decode(bs) {
|
||||
return
|
||||
}
|
||||
res.Key = *fromKey
|
||||
r.dht.handleRes(&res)
|
||||
}
|
||||
|
||||
// Decodes nodeinfo request
|
||||
func (r *router) _handleNodeInfo(bs []byte, fromKey *crypto.BoxPubKey) {
|
||||
req := nodeinfoReqRes{}
|
||||
if !req.decode(bs) {
|
||||
return
|
||||
}
|
||||
req.SendPermPub = *fromKey
|
||||
r.nodeinfo.handleNodeInfo(r, &req)
|
||||
}
|
|
@ -1,272 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
// This thing manages search packets
|
||||
|
||||
// The basic idea is as follows:
|
||||
// We may know a NodeID (with a mask) and want to connect
|
||||
// We begin a search by sending a dht lookup to ourself
|
||||
// Each time a node responds, we sort the results and filter to only include useful nodes
|
||||
// We then periodically send a packet to the first node from the list (after re-filtering)
|
||||
// This happens in parallel for each node that replies
|
||||
// Meanwhile, we keep a list of the (up to) 16 closest nodes to the destination that we've visited
|
||||
// We only consider an unvisited node useful if either the list isn't full or the unvisited node is closer to the destination than the furthest node on the list
|
||||
// That gives the search some chance to recover if it hits a dead end where a node doesn't know everyone it should
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
)
|
||||
|
||||
// This defines the time after which we time out a search (so it can restart).
|
||||
const search_RETRY_TIME = 3 * time.Second
|
||||
const search_STEP_TIME = time.Second
|
||||
const search_MAX_RESULTS = dht_lookup_size
|
||||
|
||||
// Information about an ongoing search.
|
||||
// Includes the target NodeID, the bitmask to match it to an IP, and the list of nodes to visit / already visited.
|
||||
type searchInfo struct {
|
||||
searches *searches
|
||||
dest crypto.NodeID
|
||||
mask crypto.NodeID
|
||||
time time.Time
|
||||
visited []*crypto.NodeID // Closest addresses visited so far
|
||||
callback func(*sessionInfo, error)
|
||||
// TODO context.Context for timeout and cancellation
|
||||
send uint64 // log number of requests sent
|
||||
recv uint64 // log number of responses received
|
||||
}
|
||||
|
||||
// This stores a map of active searches.
|
||||
type searches struct {
|
||||
router *router
|
||||
searches map[crypto.NodeID]*searchInfo
|
||||
}
|
||||
|
||||
// Initializes the searches struct.
|
||||
func (s *searches) init(r *router) {
|
||||
s.router = r
|
||||
s.searches = make(map[crypto.NodeID]*searchInfo)
|
||||
}
|
||||
|
||||
func (s *searches) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything to do
|
||||
}
|
||||
|
||||
// Creates a new search info, adds it to the searches struct, and returns a pointer to the info.
|
||||
func (s *searches) createSearch(dest *crypto.NodeID, mask *crypto.NodeID, callback func(*sessionInfo, error)) *searchInfo {
|
||||
info := searchInfo{
|
||||
searches: s,
|
||||
dest: *dest,
|
||||
mask: *mask,
|
||||
time: time.Now(),
|
||||
callback: callback,
|
||||
}
|
||||
s.searches[*dest] = &info
|
||||
return &info
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Checks if there's an ongoing search related to a dhtRes.
|
||||
// If there is, it adds the response info to the search and triggers a new search step.
|
||||
// If there's no ongoing search, or we if the dhtRes finished the search (it was from the target node), then don't do anything more.
|
||||
func (sinfo *searchInfo) handleDHTRes(res *dhtRes) {
|
||||
if nfo := sinfo.searches.searches[sinfo.dest]; nfo != sinfo {
|
||||
return // already done
|
||||
}
|
||||
if res != nil {
|
||||
sinfo.recv++
|
||||
if sinfo.checkDHTRes(res) {
|
||||
return // Search finished successfully
|
||||
}
|
||||
// Use results to start an additional search thread
|
||||
infos := append([]*dhtInfo(nil), res.Infos...)
|
||||
infos = sinfo.getAllowedInfos(infos)
|
||||
if len(infos) > 0 {
|
||||
sinfo.continueSearch(infos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If there has been no response in too long, then this cleans up the search.
|
||||
// Otherwise, it pops the closest node to the destination (in keyspace) off of the toVisit list and sends a dht ping.
|
||||
func (sinfo *searchInfo) doSearchStep(infos []*dhtInfo) {
|
||||
if len(infos) > 0 {
|
||||
// Send to the next search target
|
||||
next := infos[0]
|
||||
rq := dhtReqKey{next.key, sinfo.dest}
|
||||
sinfo.searches.router.dht.addCallback(&rq, sinfo.handleDHTRes)
|
||||
sinfo.searches.router.dht.ping(next, &sinfo.dest)
|
||||
sinfo.send++
|
||||
}
|
||||
}
|
||||
|
||||
// Get a list of search targets that are close enough to the destination to try
|
||||
// Requires an initial list as input
|
||||
func (sinfo *searchInfo) getAllowedInfos(infos []*dhtInfo) []*dhtInfo {
|
||||
var temp []*dhtInfo
|
||||
for _, info := range infos {
|
||||
if false && len(sinfo.visited) < search_MAX_RESULTS {
|
||||
// We're not full on results yet, so don't block anything yet
|
||||
} else if !dht_ordered(&sinfo.dest, info.getNodeID(), sinfo.visited[len(sinfo.visited)-1]) {
|
||||
// Too far away
|
||||
continue
|
||||
}
|
||||
var known bool
|
||||
for _, nfo := range sinfo.visited {
|
||||
if *nfo == *info.getNodeID() {
|
||||
known = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !known {
|
||||
temp = append(temp, info)
|
||||
}
|
||||
}
|
||||
infos = append(infos[:0], temp...) // restrict to only the allowed infos
|
||||
sort.SliceStable(infos, func(i, j int) bool {
|
||||
// Should return true if i is closer to the destination than j
|
||||
return dht_ordered(&sinfo.dest, infos[i].getNodeID(), infos[j].getNodeID())
|
||||
}) // Sort infos to start with the closest
|
||||
if len(infos) > search_MAX_RESULTS {
|
||||
infos = infos[:search_MAX_RESULTS] // Limit max number of infos
|
||||
}
|
||||
return infos
|
||||
}
|
||||
|
||||
// Run doSearchStep and schedule another continueSearch to happen after search_RETRY_TIME.
|
||||
// Must not be called with an empty list of infos
|
||||
func (sinfo *searchInfo) continueSearch(infos []*dhtInfo) {
|
||||
sinfo.doSearchStep(infos)
|
||||
infos = infos[1:] // Remove the node we just tried
|
||||
// In case there's no response, try the next node in infos later
|
||||
time.AfterFunc(search_STEP_TIME, func() {
|
||||
sinfo.searches.router.Act(nil, func() {
|
||||
// FIXME this keeps the search alive forever if not for the searches map, fix that
|
||||
newSearchInfo := sinfo.searches.searches[sinfo.dest]
|
||||
if newSearchInfo != sinfo {
|
||||
return
|
||||
}
|
||||
// Get good infos here instead of at the top, to make sure we can always start things off with a continueSearch call to ourself
|
||||
infos = sinfo.getAllowedInfos(infos)
|
||||
if len(infos) > 0 {
|
||||
sinfo.continueSearch(infos)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Initially start a search
|
||||
func (sinfo *searchInfo) startSearch() {
|
||||
loc := sinfo.searches.router.core.switchTable.getLocator()
|
||||
var infos []*dhtInfo
|
||||
infos = append(infos, &dhtInfo{
|
||||
key: sinfo.searches.router.core.boxPub,
|
||||
coords: loc.getCoords(),
|
||||
})
|
||||
// Start the search by asking ourself, useful if we're the destination
|
||||
sinfo.continueSearch(infos)
|
||||
// Start a timer to clean up the search if everything times out
|
||||
var cleanupFunc func()
|
||||
cleanupFunc = func() {
|
||||
sinfo.searches.router.Act(nil, func() {
|
||||
// FIXME this keeps the search alive forever if not for the searches map, fix that
|
||||
newSearchInfo := sinfo.searches.searches[sinfo.dest]
|
||||
if newSearchInfo != sinfo {
|
||||
return
|
||||
}
|
||||
elapsed := time.Since(sinfo.time)
|
||||
if elapsed > search_RETRY_TIME {
|
||||
// cleanup
|
||||
delete(sinfo.searches.searches, sinfo.dest)
|
||||
sinfo.searches.router.core.log.Debugln("search timeout:", &sinfo.dest, sinfo.send, sinfo.recv)
|
||||
sinfo.callback(nil, errors.New("search reached dead end"))
|
||||
return
|
||||
}
|
||||
time.AfterFunc(search_RETRY_TIME-elapsed, cleanupFunc)
|
||||
})
|
||||
}
|
||||
time.AfterFunc(search_RETRY_TIME, cleanupFunc)
|
||||
}
|
||||
|
||||
// Calls create search, and initializes the iterative search parts of the struct before returning it.
|
||||
func (s *searches) newIterSearch(dest *crypto.NodeID, mask *crypto.NodeID, callback func(*sessionInfo, error)) *searchInfo {
|
||||
sinfo := s.createSearch(dest, mask, callback)
|
||||
sinfo.visited = append(sinfo.visited, &s.router.dht.nodeID)
|
||||
return sinfo
|
||||
}
|
||||
|
||||
// Checks if a dhtRes is good (called by handleDHTRes).
|
||||
// If the response is from the target, get/create a session, trigger a session ping, and return true.
|
||||
// Otherwise return false.
|
||||
func (sinfo *searchInfo) checkDHTRes(res *dhtRes) bool {
|
||||
from := dhtInfo{key: res.Key, coords: res.Coords}
|
||||
them := from.getNodeID()
|
||||
var known bool
|
||||
for _, v := range sinfo.visited {
|
||||
if *v == *them {
|
||||
known = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !known {
|
||||
if len(sinfo.visited) < search_MAX_RESULTS || dht_ordered(&sinfo.dest, them, sinfo.visited[len(sinfo.visited)-1]) {
|
||||
// Closer to the destination than the threshold, so update visited
|
||||
sinfo.searches.router.core.log.Debugln("Updating search:", &sinfo.dest, them, sinfo.send, sinfo.recv)
|
||||
sinfo.visited = append(sinfo.visited, them)
|
||||
sort.SliceStable(sinfo.visited, func(i, j int) bool {
|
||||
// Should return true if i is closer to the destination than j
|
||||
return dht_ordered(&sinfo.dest, sinfo.visited[i], sinfo.visited[j])
|
||||
}) // Sort infos to start with the closest
|
||||
if len(sinfo.visited) > search_MAX_RESULTS {
|
||||
sinfo.visited = sinfo.visited[:search_MAX_RESULTS]
|
||||
}
|
||||
sinfo.time = time.Now()
|
||||
}
|
||||
}
|
||||
var destMasked crypto.NodeID
|
||||
var themMasked crypto.NodeID
|
||||
for idx := 0; idx < crypto.NodeIDLen; idx++ {
|
||||
destMasked[idx] = sinfo.dest[idx] & sinfo.mask[idx]
|
||||
themMasked[idx] = them[idx] & sinfo.mask[idx]
|
||||
}
|
||||
if themMasked != destMasked {
|
||||
return false
|
||||
}
|
||||
finishSearch := func(sess *sessionInfo, err error) {
|
||||
if sess != nil {
|
||||
// FIXME (!) replay attacks could mess with coords? Give it a handle (tstamp)?
|
||||
sess.Act(sinfo.searches.router, func() { sess.coords = res.Coords })
|
||||
sess.ping(sinfo.searches.router)
|
||||
}
|
||||
if err != nil {
|
||||
sinfo.callback(nil, err)
|
||||
} else {
|
||||
sinfo.callback(sess, nil)
|
||||
}
|
||||
// Cleanup
|
||||
if _, isIn := sinfo.searches.searches[sinfo.dest]; isIn {
|
||||
sinfo.searches.router.core.log.Debugln("Finished search:", &sinfo.dest, sinfo.send, sinfo.recv)
|
||||
delete(sinfo.searches.searches, res.Dest)
|
||||
}
|
||||
}
|
||||
// They match, so create a session and send a sessionRequest
|
||||
var err error
|
||||
sess, isIn := sinfo.searches.router.sessions.getByTheirPerm(&res.Key)
|
||||
if !isIn {
|
||||
// Don't already have a session
|
||||
sess = sinfo.searches.router.sessions.createSession(&res.Key)
|
||||
if sess == nil {
|
||||
err = errors.New("session not allowed")
|
||||
} else if _, isIn := sinfo.searches.router.sessions.getByTheirPerm(&res.Key); !isIn {
|
||||
panic("This should never happen")
|
||||
}
|
||||
} else {
|
||||
err = errors.New("session already exists")
|
||||
}
|
||||
finishSearch(sess, err)
|
||||
return true
|
||||
}
|
|
@ -1,546 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
// This is the session manager
|
||||
// It's responsible for keeping track of open sessions to other nodes
|
||||
// The session information consists of crypto keys and coords
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
// Duration that we keep track of old nonces per session, to allow some out-of-order packet delivery
|
||||
const nonceWindow = time.Second
|
||||
|
||||
// All the information we know about an active session.
|
||||
// This includes coords, permanent and ephemeral keys, handles and nonces, various sorts of timing information for timeout and maintenance, and some metadata for the admin API.
|
||||
type sessionInfo struct {
|
||||
phony.Inbox // Protects all of the below, use it any time you read/change the contents of a session
|
||||
sessions *sessions //
|
||||
theirAddr address.Address //
|
||||
theirSubnet address.Subnet //
|
||||
theirPermPub crypto.BoxPubKey //
|
||||
theirSesPub crypto.BoxPubKey //
|
||||
mySesPub crypto.BoxPubKey //
|
||||
mySesPriv crypto.BoxPrivKey //
|
||||
sharedPermKey crypto.BoxSharedKey // used for session pings
|
||||
sharedSesKey crypto.BoxSharedKey // derived from session keys
|
||||
theirHandle crypto.Handle //
|
||||
myHandle crypto.Handle //
|
||||
theirNonce crypto.BoxNonce //
|
||||
myNonce crypto.BoxNonce //
|
||||
theirMTU MTU //
|
||||
myMTU MTU //
|
||||
wasMTUFixed bool // Was the MTU fixed by a receive error?
|
||||
timeOpened time.Time // Time the session was opened
|
||||
time time.Time // Time we last received a packet
|
||||
mtuTime time.Time // time myMTU was last changed
|
||||
pingTime time.Time // time the first ping was sent since the last received packet
|
||||
coords []byte // coords of destination
|
||||
reset bool // reset if coords change
|
||||
tstamp int64 // ATOMIC - tstamp from their last session ping, replay attack mitigation
|
||||
bytesSent uint64 // Bytes of real traffic sent in this session
|
||||
bytesRecvd uint64 // Bytes of real traffic received in this session
|
||||
init chan struct{} // Closed when the first session pong arrives, used to signal that the session is ready for initial use
|
||||
cancel util.Cancellation // Used to terminate workers
|
||||
conn *Conn // The associated Conn object
|
||||
callbacks []chan func() // Finished work from crypto workers
|
||||
}
|
||||
|
||||
// Represents a session ping/pong packet, and includes information like public keys, a session handle, coords, a timestamp to prevent replays, and the tun/tap MTU.
|
||||
type sessionPing struct {
|
||||
SendPermPub crypto.BoxPubKey // Sender's permanent key
|
||||
Handle crypto.Handle // Random number to ID session
|
||||
SendSesPub crypto.BoxPubKey // Session key to use
|
||||
Coords []byte //
|
||||
Tstamp int64 // unix time, but the only real requirement is that it increases
|
||||
IsPong bool //
|
||||
MTU MTU //
|
||||
}
|
||||
|
||||
// Updates session info in response to a ping, after checking that the ping is OK.
|
||||
// Returns true if the session was updated, or false otherwise.
|
||||
func (s *sessionInfo) _update(p *sessionPing) bool {
|
||||
if !(p.Tstamp > s.tstamp) {
|
||||
// To protect against replay attacks
|
||||
return false
|
||||
}
|
||||
if p.SendPermPub != s.theirPermPub {
|
||||
// Should only happen if two sessions got the same handle
|
||||
// That shouldn't be allowed anyway, but if it happens then let one time out
|
||||
return false
|
||||
}
|
||||
if p.SendSesPub != s.theirSesPub {
|
||||
s.theirSesPub = p.SendSesPub
|
||||
s.theirHandle = p.Handle
|
||||
s.sharedSesKey = *crypto.GetSharedKey(&s.mySesPriv, &s.theirSesPub)
|
||||
s.theirNonce = crypto.BoxNonce{}
|
||||
}
|
||||
if p.MTU >= 1280 || p.MTU == 0 {
|
||||
s.theirMTU = p.MTU
|
||||
if s.conn != nil {
|
||||
s.conn.setMTU(s, s._getMTU())
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(s.coords, p.Coords) {
|
||||
// allocate enough space for additional coords
|
||||
s.coords = append(make([]byte, 0, len(p.Coords)+11), p.Coords...)
|
||||
}
|
||||
s.time = time.Now()
|
||||
s.tstamp = p.Tstamp
|
||||
s.reset = false
|
||||
defer func() { recover() }() // Recover if the below panics
|
||||
select {
|
||||
case <-s.init:
|
||||
default:
|
||||
// Unblock anything waiting for the session to initialize
|
||||
close(s.init)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Struct of all active sessions.
|
||||
// Sessions are indexed by handle.
|
||||
// Additionally, stores maps of address/subnet onto keys, and keys onto handles.
|
||||
type sessions struct {
|
||||
router *router
|
||||
listener *Listener
|
||||
listenerMutex sync.Mutex
|
||||
lastCleanup time.Time
|
||||
isAllowedHandler func(pubkey *crypto.BoxPubKey, initiator bool) bool // Returns true or false if session setup is allowed
|
||||
isAllowedMutex sync.RWMutex // Protects the above
|
||||
myMaximumMTU MTU // Maximum allowed session MTU
|
||||
permShared map[crypto.BoxPubKey]*crypto.BoxSharedKey // Maps known permanent keys to their shared key, used by DHT a lot
|
||||
sinfos map[crypto.Handle]*sessionInfo // Maps handle onto session info
|
||||
byTheirPerm map[crypto.BoxPubKey]*crypto.Handle // Maps theirPermPub onto handle
|
||||
}
|
||||
|
||||
// Initializes the session struct.
|
||||
func (ss *sessions) init(r *router) {
|
||||
ss.router = r
|
||||
ss.permShared = make(map[crypto.BoxPubKey]*crypto.BoxSharedKey)
|
||||
ss.sinfos = make(map[crypto.Handle]*sessionInfo)
|
||||
ss.byTheirPerm = make(map[crypto.BoxPubKey]*crypto.Handle)
|
||||
ss.lastCleanup = time.Now()
|
||||
ss.myMaximumMTU = 65535
|
||||
}
|
||||
|
||||
func (ss *sessions) reconfigure() {
|
||||
ss.router.Act(nil, func() {
|
||||
for _, session := range ss.sinfos {
|
||||
sinfo, mtu := session, ss.myMaximumMTU
|
||||
sinfo.Act(ss.router, func() {
|
||||
sinfo.myMTU = mtu
|
||||
})
|
||||
session.ping(ss.router)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Determines whether the session with a given publickey is allowed based on
|
||||
// session firewall rules.
|
||||
func (ss *sessions) isSessionAllowed(pubkey *crypto.BoxPubKey, initiator bool) bool {
|
||||
ss.isAllowedMutex.RLock()
|
||||
defer ss.isAllowedMutex.RUnlock()
|
||||
|
||||
if ss.isAllowedHandler == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return ss.isAllowedHandler(pubkey, initiator)
|
||||
}
|
||||
|
||||
// Gets the session corresponding to a given handle.
|
||||
func (ss *sessions) getSessionForHandle(handle *crypto.Handle) (*sessionInfo, bool) {
|
||||
sinfo, isIn := ss.sinfos[*handle]
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
// Gets a session corresponding to a permanent key used by the remote node.
|
||||
func (ss *sessions) getByTheirPerm(key *crypto.BoxPubKey) (*sessionInfo, bool) {
|
||||
h, isIn := ss.byTheirPerm[*key]
|
||||
if !isIn {
|
||||
return nil, false
|
||||
}
|
||||
sinfo, isIn := ss.getSessionForHandle(h)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
// Creates a new session and lazily cleans up old existing sessions. This
|
||||
// includes initializing session info to sane defaults (e.g. lowest supported
|
||||
// MTU).
|
||||
func (ss *sessions) createSession(theirPermKey *crypto.BoxPubKey) *sessionInfo {
|
||||
// TODO: this check definitely needs to be moved
|
||||
if !ss.isSessionAllowed(theirPermKey, true) {
|
||||
return nil
|
||||
}
|
||||
sinfo := sessionInfo{}
|
||||
sinfo.sessions = ss
|
||||
sinfo.theirPermPub = *theirPermKey
|
||||
sinfo.sharedPermKey = *ss.getSharedKey(&ss.router.core.boxPriv, &sinfo.theirPermPub)
|
||||
pub, priv := crypto.NewBoxKeys()
|
||||
sinfo.mySesPub = *pub
|
||||
sinfo.mySesPriv = *priv
|
||||
sinfo.myNonce = *crypto.NewBoxNonce()
|
||||
sinfo.theirMTU = 1280
|
||||
sinfo.myMTU = ss.myMaximumMTU
|
||||
now := time.Now()
|
||||
sinfo.timeOpened = now
|
||||
sinfo.time = now
|
||||
sinfo.mtuTime = now
|
||||
sinfo.pingTime = now
|
||||
sinfo.init = make(chan struct{})
|
||||
sinfo.cancel = util.NewCancellation()
|
||||
higher := false
|
||||
for idx := range ss.router.core.boxPub {
|
||||
if ss.router.core.boxPub[idx] > sinfo.theirPermPub[idx] {
|
||||
higher = true
|
||||
break
|
||||
} else if ss.router.core.boxPub[idx] < sinfo.theirPermPub[idx] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if higher {
|
||||
// higher => odd nonce
|
||||
sinfo.myNonce[len(sinfo.myNonce)-1] |= 0x01
|
||||
} else {
|
||||
// lower => even nonce
|
||||
sinfo.myNonce[len(sinfo.myNonce)-1] &= 0xfe
|
||||
}
|
||||
sinfo.myHandle = *crypto.NewHandle()
|
||||
sinfo.theirAddr = *address.AddrForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
|
||||
sinfo.theirSubnet = *address.SubnetForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
|
||||
ss.sinfos[sinfo.myHandle] = &sinfo
|
||||
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
|
||||
return &sinfo
|
||||
}
|
||||
|
||||
func (ss *sessions) cleanup() {
|
||||
// Time thresholds almost certainly could use some adjusting
|
||||
for k := range ss.permShared {
|
||||
// Delete a key, to make sure this eventually shrinks to 0
|
||||
delete(ss.permShared, k)
|
||||
break
|
||||
}
|
||||
if time.Since(ss.lastCleanup) < time.Minute {
|
||||
return
|
||||
}
|
||||
permShared := make(map[crypto.BoxPubKey]*crypto.BoxSharedKey, len(ss.permShared))
|
||||
for k, v := range ss.permShared {
|
||||
permShared[k] = v
|
||||
}
|
||||
ss.permShared = permShared
|
||||
sinfos := make(map[crypto.Handle]*sessionInfo, len(ss.sinfos))
|
||||
for k, v := range ss.sinfos {
|
||||
sinfos[k] = v
|
||||
}
|
||||
ss.sinfos = sinfos
|
||||
byTheirPerm := make(map[crypto.BoxPubKey]*crypto.Handle, len(ss.byTheirPerm))
|
||||
for k, v := range ss.byTheirPerm {
|
||||
byTheirPerm[k] = v
|
||||
}
|
||||
ss.byTheirPerm = byTheirPerm
|
||||
ss.lastCleanup = time.Now()
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) doRemove() {
|
||||
sinfo.sessions.router.Act(nil, func() {
|
||||
sinfo.sessions.removeSession(sinfo)
|
||||
})
|
||||
}
|
||||
|
||||
// Closes a session, removing it from sessions maps.
|
||||
func (ss *sessions) removeSession(sinfo *sessionInfo) {
|
||||
if s := sinfo.sessions.sinfos[sinfo.myHandle]; s == sinfo {
|
||||
delete(sinfo.sessions.sinfos, sinfo.myHandle)
|
||||
delete(sinfo.sessions.byTheirPerm, sinfo.theirPermPub)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a session ping appropriate for the given session info.
|
||||
func (sinfo *sessionInfo) _getPing() sessionPing {
|
||||
loc := sinfo.sessions.router.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
ping := sessionPing{
|
||||
SendPermPub: sinfo.sessions.router.core.boxPub,
|
||||
Handle: sinfo.myHandle,
|
||||
SendSesPub: sinfo.mySesPub,
|
||||
Tstamp: time.Now().Unix(),
|
||||
Coords: coords,
|
||||
MTU: sinfo.myMTU,
|
||||
}
|
||||
sinfo.myNonce.Increment()
|
||||
return ping
|
||||
}
|
||||
|
||||
// Gets the shared key for a pair of box keys.
|
||||
// Used to cache recently used shared keys for protocol traffic.
|
||||
// This comes up with dht req/res and session ping/pong traffic.
|
||||
func (ss *sessions) getSharedKey(myPriv *crypto.BoxPrivKey,
|
||||
theirPub *crypto.BoxPubKey) *crypto.BoxSharedKey {
|
||||
return crypto.GetSharedKey(myPriv, theirPub)
|
||||
// FIXME concurrency issues with the below, so for now we just burn the CPU every time
|
||||
if skey, isIn := ss.permShared[*theirPub]; isIn {
|
||||
return skey
|
||||
}
|
||||
// First do some cleanup
|
||||
const maxKeys = 1024
|
||||
for key := range ss.permShared {
|
||||
// Remove a random key until the store is small enough
|
||||
if len(ss.permShared) < maxKeys {
|
||||
break
|
||||
}
|
||||
delete(ss.permShared, key)
|
||||
}
|
||||
ss.permShared[*theirPub] = crypto.GetSharedKey(myPriv, theirPub)
|
||||
return ss.permShared[*theirPub]
|
||||
}
|
||||
|
||||
// Sends a session ping by calling sendPingPong in ping mode.
|
||||
func (sinfo *sessionInfo) ping(from phony.Actor) {
|
||||
sinfo.Act(from, func() {
|
||||
sinfo._sendPingPong(false)
|
||||
})
|
||||
}
|
||||
|
||||
// Calls getPing, sets the appropriate ping/pong flag, encodes to wire format, and send it.
|
||||
// Updates the time the last ping was sent in the session info.
|
||||
func (sinfo *sessionInfo) _sendPingPong(isPong bool) {
|
||||
ping := sinfo._getPing()
|
||||
ping.IsPong = isPong
|
||||
bs := ping.encode()
|
||||
payload, nonce := crypto.BoxSeal(&sinfo.sharedPermKey, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
Coords: sinfo.coords,
|
||||
ToKey: sinfo.theirPermPub,
|
||||
FromKey: sinfo.sessions.router.core.boxPub,
|
||||
Nonce: *nonce,
|
||||
Payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
// TODO rewrite the below if/when the peer struct becomes an actor, to not go through the router first
|
||||
sinfo.sessions.router.Act(sinfo, func() { sinfo.sessions.router.out(packet) })
|
||||
if sinfo.pingTime.Before(sinfo.time) {
|
||||
sinfo.pingTime = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) setConn(from phony.Actor, conn *Conn) {
|
||||
sinfo.Act(from, func() {
|
||||
sinfo.conn = conn
|
||||
sinfo.conn.setMTU(sinfo, sinfo._getMTU())
|
||||
})
|
||||
}
|
||||
|
||||
// Handles a session ping, creating a session if needed and calling update, then possibly responding with a pong if the ping was in ping mode and the update was successful.
|
||||
// If the session has a packet cached (common when first setting up a session), it will be sent.
|
||||
func (ss *sessions) handlePing(ping *sessionPing) {
|
||||
// Get the corresponding session (or create a new session)
|
||||
sinfo, isIn := ss.getByTheirPerm(&ping.SendPermPub)
|
||||
switch {
|
||||
case ping.IsPong: // This is a response, not an initial ping, so ignore it.
|
||||
case isIn: // Session already exists
|
||||
case !ss.isSessionAllowed(&ping.SendPermPub, false): // Session is not allowed
|
||||
default:
|
||||
ss.listenerMutex.Lock()
|
||||
if ss.listener != nil {
|
||||
// This is a ping from an allowed node for which no session exists, and we have a listener ready to handle sessions.
|
||||
// We need to create a session and pass it to the listener.
|
||||
sinfo = ss.createSession(&ping.SendPermPub)
|
||||
if s, _ := ss.getByTheirPerm(&ping.SendPermPub); s != sinfo {
|
||||
panic("This should not happen")
|
||||
}
|
||||
conn := newConn(ss.router.core, crypto.GetNodeID(&sinfo.theirPermPub), &crypto.NodeID{}, sinfo)
|
||||
for i := range conn.nodeMask {
|
||||
conn.nodeMask[i] = 0xFF
|
||||
}
|
||||
sinfo.setConn(ss.router, conn)
|
||||
c := ss.listener.conn
|
||||
go func() { c <- conn }()
|
||||
}
|
||||
ss.listenerMutex.Unlock()
|
||||
}
|
||||
if sinfo != nil {
|
||||
sinfo.Act(ss.router, func() {
|
||||
// Update the session
|
||||
if !sinfo._update(ping) { /*panic("Should not happen in testing")*/
|
||||
return
|
||||
}
|
||||
if !ping.IsPong {
|
||||
sinfo._sendPingPong(true)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Get the MTU of the session.
|
||||
// Will be equal to the smaller of this node's MTU or the remote node's MTU.
|
||||
// If sending over links with a maximum message size (this was a thing with the old UDP code), it could be further lowered, to a minimum of 1280.
|
||||
func (sinfo *sessionInfo) _getMTU() MTU {
|
||||
if sinfo.theirMTU == 0 || sinfo.myMTU == 0 {
|
||||
return 0
|
||||
}
|
||||
if sinfo.theirMTU < sinfo.myMTU {
|
||||
return sinfo.theirMTU
|
||||
}
|
||||
return sinfo.myMTU
|
||||
}
|
||||
|
||||
// Checks if a packet's nonce is recent enough to fall within the window of allowed packets, and not already received.
|
||||
func (sinfo *sessionInfo) _nonceIsOK(theirNonce *crypto.BoxNonce) bool {
|
||||
// The bitmask is to allow for some non-duplicate out-of-order packets
|
||||
if theirNonce.Minus(&sinfo.theirNonce) > 0 {
|
||||
// This is newer than the newest nonce we've seen
|
||||
return true
|
||||
}
|
||||
return time.Since(sinfo.time) < nonceWindow
|
||||
}
|
||||
|
||||
// Updates the nonce mask by (possibly) shifting the bitmask and setting the bit corresponding to this nonce to 1, and then updating the most recent nonce
|
||||
func (sinfo *sessionInfo) _updateNonce(theirNonce *crypto.BoxNonce) {
|
||||
if theirNonce.Minus(&sinfo.theirNonce) > 0 {
|
||||
// This nonce is the newest we've seen, so make a note of that
|
||||
sinfo.theirNonce = *theirNonce
|
||||
sinfo.time = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
// Resets all sessions to an uninitialized state.
|
||||
// Called after coord changes, so attempts to use a session will trigger a new ping and notify the remote end of the coord change.
|
||||
// Only call this from the router actor.
|
||||
func (ss *sessions) reset() {
|
||||
for _, _sinfo := range ss.sinfos {
|
||||
sinfo := _sinfo // So we can safely put it in a closure
|
||||
sinfo.Act(ss.router, func() {
|
||||
sinfo.reset = true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////// Worker Functions Below ////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type sessionCryptoManager struct {
|
||||
phony.Inbox
|
||||
}
|
||||
|
||||
func (m *sessionCryptoManager) workerGo(from phony.Actor, f func()) {
|
||||
m.Act(from, func() {
|
||||
util.WorkerGo(f)
|
||||
})
|
||||
}
|
||||
|
||||
var manager = sessionCryptoManager{}
|
||||
|
||||
type FlowKeyMessage struct {
|
||||
FlowKey uint64
|
||||
Message []byte
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) recv(from phony.Actor, packet *wire_trafficPacket) {
|
||||
sinfo.Act(from, func() {
|
||||
sinfo._recvPacket(packet)
|
||||
})
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) _recvPacket(p *wire_trafficPacket) {
|
||||
select {
|
||||
case <-sinfo.init:
|
||||
default:
|
||||
// TODO find a better way to drop things until initialized
|
||||
util.PutBytes(p.Payload)
|
||||
return
|
||||
}
|
||||
if !sinfo._nonceIsOK(&p.Nonce) {
|
||||
util.PutBytes(p.Payload)
|
||||
return
|
||||
}
|
||||
k := sinfo.sharedSesKey
|
||||
var isOK bool
|
||||
var bs []byte
|
||||
ch := make(chan func(), 1)
|
||||
poolFunc := func() {
|
||||
bs, isOK = crypto.BoxOpen(&k, p.Payload, &p.Nonce)
|
||||
callback := func() {
|
||||
util.PutBytes(p.Payload)
|
||||
if !isOK || k != sinfo.sharedSesKey || !sinfo._nonceIsOK(&p.Nonce) {
|
||||
// Either we failed to decrypt, or the session was updated, or we
|
||||
// received this packet in the mean time
|
||||
util.PutBytes(bs)
|
||||
return
|
||||
}
|
||||
sinfo._updateNonce(&p.Nonce)
|
||||
sinfo.bytesRecvd += uint64(len(bs))
|
||||
sinfo.conn.recvMsg(sinfo, bs)
|
||||
}
|
||||
ch <- callback
|
||||
sinfo.checkCallbacks()
|
||||
}
|
||||
sinfo.callbacks = append(sinfo.callbacks, ch)
|
||||
manager.workerGo(sinfo, poolFunc)
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) _send(msg FlowKeyMessage) {
|
||||
select {
|
||||
case <-sinfo.init:
|
||||
default:
|
||||
// TODO find a better way to drop things until initialized
|
||||
util.PutBytes(msg.Message)
|
||||
return
|
||||
}
|
||||
sinfo.bytesSent += uint64(len(msg.Message))
|
||||
coords := append([]byte(nil), sinfo.coords...)
|
||||
if msg.FlowKey != 0 {
|
||||
coords = append(coords, 0)
|
||||
coords = append(coords, wire_encode_uint64(msg.FlowKey)...)
|
||||
}
|
||||
p := wire_trafficPacket{
|
||||
Coords: coords,
|
||||
Handle: sinfo.theirHandle,
|
||||
Nonce: sinfo.myNonce,
|
||||
}
|
||||
sinfo.myNonce.Increment()
|
||||
k := sinfo.sharedSesKey
|
||||
ch := make(chan func(), 1)
|
||||
poolFunc := func() {
|
||||
p.Payload, _ = crypto.BoxSeal(&k, msg.Message, &p.Nonce)
|
||||
callback := func() {
|
||||
// Encoding may block on a util.GetBytes(), so kept out of the worker pool
|
||||
packet := p.encode()
|
||||
// Cleanup
|
||||
util.PutBytes(msg.Message)
|
||||
util.PutBytes(p.Payload)
|
||||
// Send the packet
|
||||
// TODO replace this with a send to the peer struct if that becomes an actor
|
||||
sinfo.sessions.router.Act(sinfo, func() {
|
||||
sinfo.sessions.router.out(packet)
|
||||
})
|
||||
}
|
||||
ch <- callback
|
||||
sinfo.checkCallbacks()
|
||||
}
|
||||
sinfo.callbacks = append(sinfo.callbacks, ch)
|
||||
manager.workerGo(sinfo, poolFunc)
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) checkCallbacks() {
|
||||
sinfo.Act(nil, func() {
|
||||
if len(sinfo.callbacks) > 0 {
|
||||
select {
|
||||
case callback := <-sinfo.callbacks[0]:
|
||||
sinfo.callbacks = sinfo.callbacks[1:]
|
||||
callback()
|
||||
sinfo.checkCallbacks()
|
||||
default:
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
)
|
||||
|
||||
// Test that this matches the interface we expect
|
||||
var _ = linkInterfaceMsgIO(&stream{})
|
||||
|
||||
type stream struct {
|
||||
rwc io.ReadWriteCloser
|
||||
inputBuffer *bufio.Reader
|
||||
outputBuffer net.Buffers
|
||||
}
|
||||
|
||||
func (s *stream) close() error {
|
||||
return s.rwc.Close()
|
||||
}
|
||||
|
||||
const streamMsgSize = 2048 + 65535
|
||||
|
||||
var streamMsg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits"
|
||||
|
||||
func (s *stream) init(rwc io.ReadWriteCloser) {
|
||||
// TODO have this also do the metadata handshake and create the peer struct
|
||||
s.rwc = rwc
|
||||
// TODO call something to do the metadata exchange
|
||||
s.inputBuffer = bufio.NewReaderSize(s.rwc, 2*streamMsgSize)
|
||||
}
|
||||
|
||||
// writeMsg writes a message with stream padding, and is *not* thread safe.
|
||||
func (s *stream) writeMsgs(bss [][]byte) (int, error) {
|
||||
buf := s.outputBuffer[:0]
|
||||
var written int
|
||||
for _, bs := range bss {
|
||||
buf = append(buf, streamMsg[:])
|
||||
buf = append(buf, wire_encode_uint64(uint64(len(bs))))
|
||||
buf = append(buf, bs)
|
||||
written += len(bs)
|
||||
}
|
||||
s.outputBuffer = buf[:0] // So we can reuse the same underlying array later
|
||||
_, err := buf.WriteTo(s.rwc)
|
||||
// TODO only include number of bytes from bs *successfully* written?
|
||||
return written, err
|
||||
}
|
||||
|
||||
// readMsg reads a message from the stream, accounting for stream padding, and is *not* thread safe.
|
||||
func (s *stream) readMsg() ([]byte, error) {
|
||||
for {
|
||||
bs, err := s.readMsgFromBuffer()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("message error: %v", err)
|
||||
}
|
||||
return bs, err
|
||||
}
|
||||
}
|
||||
|
||||
// Writes metadata bytes without stream padding, meant to be temporary
|
||||
func (s *stream) _sendMetaBytes(metaBytes []byte) error {
|
||||
var written int
|
||||
for written < len(metaBytes) {
|
||||
n, err := s.rwc.Write(metaBytes)
|
||||
written += n
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reads metadata bytes without stream padding, meant to be temporary
|
||||
func (s *stream) _recvMetaBytes() ([]byte, error) {
|
||||
var meta version_metadata
|
||||
frag := meta.encode()
|
||||
metaBytes := make([]byte, 0, len(frag))
|
||||
for len(metaBytes) < len(frag) {
|
||||
n, err := s.rwc.Read(frag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metaBytes = append(metaBytes, frag[:n]...)
|
||||
}
|
||||
return metaBytes, nil
|
||||
}
|
||||
|
||||
// Reads bytes from the underlying rwc and returns 1 full message
|
||||
func (s *stream) readMsgFromBuffer() ([]byte, error) {
|
||||
pad := streamMsg // Copy
|
||||
_, err := io.ReadFull(s.inputBuffer, pad[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if pad != streamMsg {
|
||||
return nil, errors.New("bad message")
|
||||
}
|
||||
lenSlice := make([]byte, 0, 10)
|
||||
// FIXME this nextByte stuff depends on wire.go format, kind of ugly to have it here
|
||||
nextByte := byte(0xff)
|
||||
for nextByte > 127 {
|
||||
nextByte, err = s.inputBuffer.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lenSlice = append(lenSlice, nextByte)
|
||||
}
|
||||
msgLen, _ := wire_decode_uint64(lenSlice)
|
||||
if msgLen > streamMsgSize {
|
||||
return nil, errors.New("oversized message")
|
||||
}
|
||||
msg := util.ResizeBytes(util.GetBytes(), int(msgLen))
|
||||
_, err = io.ReadFull(s.inputBuffer, msg)
|
||||
return msg, err
|
||||
}
|
|
@ -1,890 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
// This part constructs a spanning tree of the network
|
||||
// It routes packets based on distance on the spanning tree
|
||||
// In general, this is *not* equivalent to routing on the tree
|
||||
// It falls back to the tree in the worst case, but it can take shortcuts too
|
||||
// This is the part that makes routing reasonably efficient on scale-free graphs
|
||||
|
||||
// TODO document/comment everything in a lot more detail
|
||||
|
||||
// TODO? use a pre-computed lookup table (python version had this)
|
||||
// A little annoying to do with constant changes from backpressure
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
|
||||
"github.com/Arceliar/phony"
|
||||
)
|
||||
|
||||
const (
|
||||
switch_timeout = time.Minute
|
||||
switch_updateInterval = switch_timeout / 2
|
||||
switch_throttle = switch_updateInterval / 2
|
||||
switch_faster_threshold = 240 //Number of switch updates before switching to a faster parent
|
||||
)
|
||||
|
||||
// The switch locator represents the topology and network state dependent info about a node, minus the signatures that go with it.
|
||||
// Nodes will pick the best root they see, provided that the root continues to push out updates with new timestamps.
|
||||
// The coords represent a path from the root to a node.
|
||||
// This path is generally part of a spanning tree, except possibly the last hop (it can loop when sending coords to your parent, but they see this and know not to use a looping path).
|
||||
type switchLocator struct {
|
||||
root crypto.SigPubKey
|
||||
tstamp int64
|
||||
coords []switchPort
|
||||
}
|
||||
|
||||
// Returns true if the first sigPubKey has a higher TreeID.
|
||||
func firstIsBetter(first, second *crypto.SigPubKey) bool {
|
||||
// Higher TreeID is better
|
||||
ftid := crypto.GetTreeID(first)
|
||||
stid := crypto.GetTreeID(second)
|
||||
for idx := 0; idx < len(ftid); idx++ {
|
||||
if ftid[idx] == stid[idx] {
|
||||
continue
|
||||
}
|
||||
return ftid[idx] > stid[idx]
|
||||
}
|
||||
// Edge case, when comparing identical IDs
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns a copy of the locator which can safely be mutated.
|
||||
func (l *switchLocator) clone() switchLocator {
|
||||
// Used to create a deep copy for use in messages
|
||||
// Copy required because we need to mutate coords before sending
|
||||
// (By appending the port from us to the destination)
|
||||
loc := *l
|
||||
loc.coords = make([]switchPort, len(l.coords), len(l.coords)+1)
|
||||
copy(loc.coords, l.coords)
|
||||
return loc
|
||||
}
|
||||
|
||||
// Gets the distance a locator is from the provided destination coords, with the coords provided in []byte format (used to compress integers sent over the wire).
|
||||
func (l *switchLocator) dist(dest []byte) int {
|
||||
// Returns distance (on the tree) from these coords
|
||||
offset := 0
|
||||
fdc := 0
|
||||
for {
|
||||
if fdc >= len(l.coords) {
|
||||
break
|
||||
}
|
||||
coord, length := wire_decode_uint64(dest[offset:])
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
if l.coords[fdc] != switchPort(coord) {
|
||||
break
|
||||
}
|
||||
fdc++
|
||||
offset += length
|
||||
}
|
||||
dist := len(l.coords[fdc:])
|
||||
for {
|
||||
_, length := wire_decode_uint64(dest[offset:])
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
dist++
|
||||
offset += length
|
||||
}
|
||||
return dist
|
||||
}
|
||||
|
||||
// Gets coords in wire encoded format, with *no* length prefix.
|
||||
func (l *switchLocator) getCoords() []byte {
|
||||
bs := make([]byte, 0, len(l.coords))
|
||||
for _, coord := range l.coords {
|
||||
c := wire_encode_uint64(uint64(coord))
|
||||
bs = append(bs, c...)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
// Returns true if this locator represents an ancestor of the locator given as an argument.
|
||||
// Ancestor means that it's the parent node, or the parent of parent, and so on...
|
||||
func (x *switchLocator) isAncestorOf(y *switchLocator) bool {
|
||||
if x.root != y.root {
|
||||
return false
|
||||
}
|
||||
if len(x.coords) > len(y.coords) {
|
||||
return false
|
||||
}
|
||||
for idx := range x.coords {
|
||||
if x.coords[idx] != y.coords[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Information about a peer, used by the switch to build the tree and eventually make routing decisions.
|
||||
type peerInfo struct {
|
||||
key crypto.SigPubKey // ID of this peer
|
||||
locator switchLocator // Should be able to respond with signatures upon request
|
||||
degree uint64 // Self-reported degree
|
||||
time time.Time // Time this node was last seen
|
||||
faster map[switchPort]uint64 // Counter of how often a node is faster than the current parent, penalized extra if slower
|
||||
port switchPort // Interface number of this peer
|
||||
msg switchMsg // The wire switchMsg used
|
||||
blocked bool // True if the link is blocked, used to avoid parenting a blocked link
|
||||
}
|
||||
|
||||
// This is just a uint64 with a named type for clarity reasons.
|
||||
type switchPort uint64
|
||||
|
||||
// This is the subset of the information about a peer needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
|
||||
type tableElem struct {
|
||||
port switchPort
|
||||
locator switchLocator
|
||||
time time.Time
|
||||
}
|
||||
|
||||
// This is the subset of the information about all peers needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
|
||||
type lookupTable struct {
|
||||
self switchLocator
|
||||
elems map[switchPort]tableElem
|
||||
}
|
||||
|
||||
// This is switch information which is mutable and needs to be modified by other goroutines, but is not accessed atomically.
|
||||
// Use the switchTable functions to access it safely using the RWMutex for synchronization.
|
||||
type switchData struct {
|
||||
// All data that's mutable and used by exported Table methods
|
||||
// To be read/written with atomic.Value Store/Load calls
|
||||
locator switchLocator
|
||||
seq uint64 // Sequence number, reported to peers, so they know about changes
|
||||
peers map[switchPort]peerInfo
|
||||
msg *switchMsg
|
||||
}
|
||||
|
||||
// All the information stored by the switch.
|
||||
type switchTable struct {
|
||||
core *Core
|
||||
key crypto.SigPubKey // Our own key
|
||||
time time.Time // Time when locator.tstamp was last updated
|
||||
drop map[crypto.SigPubKey]int64 // Tstamp associated with a dropped root
|
||||
mutex sync.RWMutex // Lock for reads/writes of switchData
|
||||
parent switchPort // Port of whatever peer is our parent, or self if we're root
|
||||
data switchData //
|
||||
updater atomic.Value // *sync.Once
|
||||
table atomic.Value // lookupTable
|
||||
phony.Inbox // Owns the below
|
||||
queues switch_buffers // Queues - not atomic so ONLY use through the actor
|
||||
idle map[switchPort]struct{} // idle peers - not atomic so ONLY use through the actor
|
||||
sending map[switchPort]struct{} // peers known to be blocked in a send (somehow)
|
||||
}
|
||||
|
||||
// Minimum allowed total size of switch queues.
|
||||
const SwitchQueueTotalMinSize = 4 * 1024 * 1024
|
||||
|
||||
// Initializes the switchTable struct.
|
||||
func (t *switchTable) init(core *Core) {
|
||||
now := time.Now()
|
||||
t.core = core
|
||||
t.key = t.core.sigPub
|
||||
locator := switchLocator{root: t.key, tstamp: now.Unix()}
|
||||
peers := make(map[switchPort]peerInfo)
|
||||
t.data = switchData{locator: locator, peers: peers}
|
||||
t.updater.Store(&sync.Once{})
|
||||
t.table.Store(lookupTable{})
|
||||
t.drop = make(map[crypto.SigPubKey]int64)
|
||||
phony.Block(t, func() {
|
||||
core.config.Mutex.RLock()
|
||||
if core.config.Current.SwitchOptions.MaxTotalQueueSize > SwitchQueueTotalMinSize {
|
||||
t.queues.totalMaxSize = core.config.Current.SwitchOptions.MaxTotalQueueSize
|
||||
} else {
|
||||
t.queues.totalMaxSize = SwitchQueueTotalMinSize
|
||||
}
|
||||
core.config.Mutex.RUnlock()
|
||||
t.queues.bufs = make(map[string]switch_buffer)
|
||||
t.idle = make(map[switchPort]struct{})
|
||||
t.sending = make(map[switchPort]struct{})
|
||||
})
|
||||
}
|
||||
|
||||
func (t *switchTable) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything useful to do.
|
||||
t.core.link.reconfigure()
|
||||
t.core.peers.reconfigure()
|
||||
}
|
||||
|
||||
// Safely gets a copy of this node's locator.
|
||||
func (t *switchTable) getLocator() switchLocator {
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
return t.data.locator.clone()
|
||||
}
|
||||
|
||||
// Regular maintenance to possibly timeout/reset the root and similar.
|
||||
func (t *switchTable) doMaintenance() {
|
||||
// Periodic maintenance work to keep things internally consistent
|
||||
t.mutex.Lock() // Write lock
|
||||
defer t.mutex.Unlock() // Release lock when we're done
|
||||
t.cleanRoot()
|
||||
t.cleanDropped()
|
||||
}
|
||||
|
||||
// Updates the root periodically if it is ourself, or promotes ourself to root if we're better than the current root or if the current root has timed out.
|
||||
func (t *switchTable) cleanRoot() {
|
||||
// TODO rethink how this is done?...
|
||||
// Get rid of the root if it looks like its timed out
|
||||
now := time.Now()
|
||||
doUpdate := false
|
||||
if now.Sub(t.time) > switch_timeout {
|
||||
dropped := t.data.peers[t.parent]
|
||||
dropped.time = t.time
|
||||
t.drop[t.data.locator.root] = t.data.locator.tstamp
|
||||
doUpdate = true
|
||||
}
|
||||
// Or, if we're better than our root, root ourself
|
||||
if firstIsBetter(&t.key, &t.data.locator.root) {
|
||||
doUpdate = true
|
||||
}
|
||||
// Or, if we are the root, possibly update our timestamp
|
||||
if t.data.locator.root == t.key &&
|
||||
now.Sub(t.time) > switch_updateInterval {
|
||||
doUpdate = true
|
||||
}
|
||||
if doUpdate {
|
||||
t.parent = switchPort(0)
|
||||
t.time = now
|
||||
if t.data.locator.root != t.key {
|
||||
t.data.seq++
|
||||
t.updater.Store(&sync.Once{})
|
||||
t.core.router.reset(nil)
|
||||
}
|
||||
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
|
||||
t.core.peers.sendSwitchMsgs(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Blocks and, if possible, unparents a peer
|
||||
func (t *switchTable) blockPeer(port switchPort) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
peer, isIn := t.data.peers[port]
|
||||
if !isIn {
|
||||
return
|
||||
}
|
||||
peer.blocked = true
|
||||
t.data.peers[port] = peer
|
||||
if port != t.parent {
|
||||
return
|
||||
}
|
||||
t.parent = 0
|
||||
for _, info := range t.data.peers {
|
||||
if info.port == port {
|
||||
continue
|
||||
}
|
||||
t.unlockedHandleMsg(&info.msg, info.port, true)
|
||||
}
|
||||
t.unlockedHandleMsg(&peer.msg, peer.port, true)
|
||||
}
|
||||
|
||||
// Removes a peer.
|
||||
// Must be called by the router actor with a lambda that calls this.
|
||||
// If the removed peer was this node's parent, it immediately tries to find a new parent.
|
||||
func (t *switchTable) forgetPeer(port switchPort) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
delete(t.data.peers, port)
|
||||
t.updater.Store(&sync.Once{})
|
||||
if port != t.parent {
|
||||
return
|
||||
}
|
||||
t.parent = 0
|
||||
for _, info := range t.data.peers {
|
||||
t.unlockedHandleMsg(&info.msg, info.port, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Dropped is a list of roots that are better than the current root, but stopped sending new timestamps.
|
||||
// If we switch to a new root, and that root is better than an old root that previously timed out, then we can clean up the old dropped root infos.
|
||||
// This function is called periodically to do that cleanup.
|
||||
func (t *switchTable) cleanDropped() {
|
||||
// TODO? only call this after root changes, not periodically
|
||||
for root := range t.drop {
|
||||
if !firstIsBetter(&root, &t.data.locator.root) {
|
||||
delete(t.drop, root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A switchMsg contains the root node's sig key, timestamp, and signed per-hop information about a path from the root node to some other node in the network.
|
||||
// This is exchanged with peers to construct the spanning tree.
|
||||
// A subset of this information, excluding the signatures, is used to construct locators that are used elsewhere in the code.
|
||||
type switchMsg struct {
|
||||
Root crypto.SigPubKey
|
||||
TStamp int64
|
||||
Hops []switchMsgHop
|
||||
}
|
||||
|
||||
// This represents the signed information about the path leading from the root the Next node, via the Port specified here.
|
||||
type switchMsgHop struct {
|
||||
Port switchPort
|
||||
Next crypto.SigPubKey
|
||||
Sig crypto.SigBytes
|
||||
}
|
||||
|
||||
// This returns a *switchMsg to a copy of this node's current switchMsg, which can safely have additional information appended to Hops and sent to a peer.
|
||||
func (t *switchTable) getMsg() *switchMsg {
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
if t.parent == 0 {
|
||||
return &switchMsg{Root: t.key, TStamp: t.data.locator.tstamp}
|
||||
} else if parent, isIn := t.data.peers[t.parent]; isIn {
|
||||
msg := parent.msg
|
||||
msg.Hops = append([]switchMsgHop(nil), msg.Hops...)
|
||||
return &msg
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// This function checks that the root information in a switchMsg is OK.
|
||||
// In particular, that the root is better, or else the same as the current root but with a good timestamp, and that this root+timestamp haven't been dropped due to timeout.
|
||||
func (t *switchTable) checkRoot(msg *switchMsg) bool {
|
||||
// returns false if it's a dropped root, not a better root, or has an older timestamp
|
||||
// returns true otherwise
|
||||
// used elsewhere to keep inserting peers into the dht only if root info is OK
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
dropTstamp, isIn := t.drop[msg.Root]
|
||||
switch {
|
||||
case isIn && dropTstamp >= msg.TStamp:
|
||||
return false
|
||||
case firstIsBetter(&msg.Root, &t.data.locator.root):
|
||||
return true
|
||||
case t.data.locator.root != msg.Root:
|
||||
return false
|
||||
case t.data.locator.tstamp > msg.TStamp:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// This is a mutexed wrapper to unlockedHandleMsg, and is called by the peer structs in peers.go to pass a switchMsg for that peer into the switch.
|
||||
func (t *switchTable) handleMsg(msg *switchMsg, fromPort switchPort) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
t.unlockedHandleMsg(msg, fromPort, false)
|
||||
}
|
||||
|
||||
// This updates the switch with information about a peer.
|
||||
// Then the tricky part, it decides if it should update our own locator as a result.
|
||||
// That happens if this node is already our parent, or is advertising a better root, or is advertising a better path to the same root, etc...
|
||||
// There are a lot of very delicate order sensitive checks here, so its' best to just read the code if you need to understand what it's doing.
|
||||
// It's very important to not change the order of the statements in the case function unless you're absolutely sure that it's safe, including safe if used alongside nodes that used the previous order.
|
||||
// Set the third arg to true if you're reprocessing an old message, e.g. to find a new parent after one disconnects, to avoid updating some timing related things.
|
||||
func (t *switchTable) unlockedHandleMsg(msg *switchMsg, fromPort switchPort, reprocessing bool) {
|
||||
// TODO directly use a switchMsg instead of switchMessage + sigs
|
||||
now := time.Now()
|
||||
// Set up the sender peerInfo
|
||||
var sender peerInfo
|
||||
sender.locator.root = msg.Root
|
||||
sender.locator.tstamp = msg.TStamp
|
||||
prevKey := msg.Root
|
||||
for _, hop := range msg.Hops {
|
||||
// Build locator
|
||||
sender.locator.coords = append(sender.locator.coords, hop.Port)
|
||||
sender.key = prevKey
|
||||
prevKey = hop.Next
|
||||
}
|
||||
sender.msg = *msg
|
||||
sender.port = fromPort
|
||||
sender.time = now
|
||||
// Decide what to do
|
||||
equiv := func(x *switchLocator, y *switchLocator) bool {
|
||||
if x.root != y.root {
|
||||
return false
|
||||
}
|
||||
if len(x.coords) != len(y.coords) {
|
||||
return false
|
||||
}
|
||||
for idx := range x.coords {
|
||||
if x.coords[idx] != y.coords[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
doUpdate := false
|
||||
oldSender := t.data.peers[fromPort]
|
||||
if !equiv(&sender.locator, &oldSender.locator) {
|
||||
// Reset faster info, we'll start refilling it right after this
|
||||
sender.faster = nil
|
||||
doUpdate = true
|
||||
}
|
||||
// Update the matrix of peer "faster" thresholds
|
||||
if reprocessing {
|
||||
sender.faster = oldSender.faster
|
||||
sender.time = oldSender.time
|
||||
sender.blocked = oldSender.blocked
|
||||
} else {
|
||||
sender.faster = make(map[switchPort]uint64, len(oldSender.faster))
|
||||
for port, peer := range t.data.peers {
|
||||
if port == fromPort {
|
||||
continue
|
||||
} else if sender.locator.root != peer.locator.root || sender.locator.tstamp > peer.locator.tstamp {
|
||||
// We were faster than this node, so increment, as long as we don't overflow because of it
|
||||
if oldSender.faster[peer.port] < switch_faster_threshold {
|
||||
sender.faster[port] = oldSender.faster[peer.port] + 1
|
||||
} else {
|
||||
sender.faster[port] = switch_faster_threshold
|
||||
}
|
||||
} else {
|
||||
// Slower than this node, penalize (more than the reward amount)
|
||||
if oldSender.faster[port] > 1 {
|
||||
sender.faster[port] = oldSender.faster[peer.port] - 2
|
||||
} else {
|
||||
sender.faster[port] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update sender
|
||||
t.data.peers[fromPort] = sender
|
||||
// Decide if we should also update our root info to make the sender our parent
|
||||
updateRoot := false
|
||||
oldParent, isIn := t.data.peers[t.parent]
|
||||
noParent := !isIn
|
||||
noLoop := func() bool {
|
||||
for idx := 0; idx < len(msg.Hops)-1; idx++ {
|
||||
if msg.Hops[idx].Next == t.core.sigPub {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if sender.locator.root == t.core.sigPub {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}()
|
||||
dropTstamp, isIn := t.drop[sender.locator.root]
|
||||
// Decide if we need to update info about the root or change parents.
|
||||
switch {
|
||||
case !noLoop:
|
||||
// This route loops, so we can't use the sender as our parent.
|
||||
case isIn && dropTstamp >= sender.locator.tstamp:
|
||||
// This is a known root with a timestamp older than a known timeout, so we can't trust it to be a new announcement.
|
||||
case firstIsBetter(&sender.locator.root, &t.data.locator.root):
|
||||
// This is a better root than what we're currently using, so we should update.
|
||||
updateRoot = true
|
||||
case t.data.locator.root != sender.locator.root:
|
||||
// This is not the same root, and it's apparently not better (from the above), so we should ignore it.
|
||||
case t.data.locator.tstamp > sender.locator.tstamp:
|
||||
// This timetsamp is older than the most recently seen one from this root, so we should ignore it.
|
||||
case noParent:
|
||||
// We currently have no working parent, and at this point in the switch statement, anything is better than nothing.
|
||||
updateRoot = true
|
||||
case sender.faster[t.parent] >= switch_faster_threshold:
|
||||
// The is reliably faster than the current parent.
|
||||
updateRoot = true
|
||||
case !sender.blocked && oldParent.blocked:
|
||||
// Replace a blocked parent
|
||||
updateRoot = true
|
||||
case reprocessing && sender.blocked && !oldParent.blocked:
|
||||
// Don't replace an unblocked parent when reprocessing
|
||||
case reprocessing && sender.faster[t.parent] > oldParent.faster[sender.port]:
|
||||
// The sender seems to be reliably faster than the current parent, so switch to them instead.
|
||||
updateRoot = true
|
||||
case sender.port != t.parent:
|
||||
// Ignore further cases if the sender isn't our parent.
|
||||
case !reprocessing && !equiv(&sender.locator, &t.data.locator):
|
||||
// Special case:
|
||||
// If coords changed, then we need to penalize this node somehow, to prevent flapping.
|
||||
// First, reset all faster-related info to 0.
|
||||
// Then, de-parent the node and reprocess all messages to find a new parent.
|
||||
t.parent = 0
|
||||
for _, peer := range t.data.peers {
|
||||
if peer.port == sender.port {
|
||||
continue
|
||||
}
|
||||
t.unlockedHandleMsg(&peer.msg, peer.port, true)
|
||||
}
|
||||
// Process the sender last, to avoid keeping them as a parent if at all possible.
|
||||
t.unlockedHandleMsg(&sender.msg, sender.port, true)
|
||||
case now.Sub(t.time) < switch_throttle:
|
||||
// We've already gotten an update from this root recently, so ignore this one to avoid flooding.
|
||||
case sender.locator.tstamp > t.data.locator.tstamp:
|
||||
// The timestamp was updated, so we need to update locally and send to our peers.
|
||||
updateRoot = true
|
||||
}
|
||||
if updateRoot {
|
||||
if !equiv(&sender.locator, &t.data.locator) {
|
||||
doUpdate = true
|
||||
t.data.seq++
|
||||
t.core.router.reset(nil)
|
||||
}
|
||||
if t.data.locator.tstamp != sender.locator.tstamp {
|
||||
t.time = now
|
||||
}
|
||||
t.data.locator = sender.locator
|
||||
t.parent = sender.port
|
||||
t.core.peers.sendSwitchMsgs(t)
|
||||
}
|
||||
if true || doUpdate {
|
||||
t.updater.Store(&sync.Once{})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// The rest of these are related to the switch worker
|
||||
|
||||
// This is called via a sync.Once to update the atomically readable subset of switch information that gets used for routing decisions.
|
||||
func (t *switchTable) updateTable() {
|
||||
// WARNING this should only be called from within t.data.updater.Do()
|
||||
// It relies on the sync.Once for synchronization with messages and lookups
|
||||
// TODO use a pre-computed faster lookup table
|
||||
// Instead of checking distance for every destination every time
|
||||
// Array of structs, indexed by first coord that differs from self
|
||||
// Each struct has stores the best port to forward to, and a next coord map
|
||||
// Move to struct, then iterate over coord maps until you dead end
|
||||
// The last port before the dead end should be the closest
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
newTable := lookupTable{
|
||||
self: t.data.locator.clone(),
|
||||
elems: make(map[switchPort]tableElem, len(t.data.peers)),
|
||||
}
|
||||
for _, pinfo := range t.data.peers {
|
||||
//if !pinfo.forward { continue }
|
||||
if pinfo.locator.root != newTable.self.root {
|
||||
continue
|
||||
}
|
||||
loc := pinfo.locator.clone()
|
||||
loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link
|
||||
newTable.elems[pinfo.port] = tableElem{
|
||||
locator: loc,
|
||||
port: pinfo.port,
|
||||
time: pinfo.time,
|
||||
}
|
||||
}
|
||||
t.table.Store(newTable)
|
||||
}
|
||||
|
||||
// Returns a copy of the atomically-updated table used for switch lookups
|
||||
func (t *switchTable) getTable() lookupTable {
|
||||
t.updater.Load().(*sync.Once).Do(t.updateTable)
|
||||
return t.table.Load().(lookupTable)
|
||||
}
|
||||
|
||||
// Starts the switch worker
|
||||
func (t *switchTable) start() error {
|
||||
t.core.log.Infoln("Starting switch")
|
||||
// There's actually nothing to do to start it...
|
||||
return nil
|
||||
}
|
||||
|
||||
type closerInfo struct {
|
||||
elem tableElem
|
||||
dist int
|
||||
}
|
||||
|
||||
// Return a map of ports onto distance, keeping only ports closer to the destination than this node
|
||||
// If the map is empty (or nil), then no peer is closer
|
||||
func (t *switchTable) getCloser(dest []byte) []closerInfo {
|
||||
table := t.getTable()
|
||||
myDist := table.self.dist(dest)
|
||||
if myDist == 0 {
|
||||
// Skip the iteration step if it's impossible to be closer
|
||||
return nil
|
||||
}
|
||||
t.queues.closer = t.queues.closer[:0]
|
||||
for _, info := range table.elems {
|
||||
dist := info.locator.dist(dest)
|
||||
if dist < myDist {
|
||||
t.queues.closer = append(t.queues.closer, closerInfo{info, dist})
|
||||
}
|
||||
}
|
||||
return t.queues.closer
|
||||
}
|
||||
|
||||
// Returns true if the peer is closer to the destination than ourself
|
||||
func (t *switchTable) portIsCloser(dest []byte, port switchPort) bool {
|
||||
table := t.getTable()
|
||||
if info, isIn := table.elems[port]; isIn {
|
||||
theirDist := info.locator.dist(dest)
|
||||
myDist := table.self.dist(dest)
|
||||
return theirDist < myDist
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Get the coords of a packet without decoding
|
||||
func switch_getPacketCoords(packet []byte) []byte {
|
||||
_, pTypeLen := wire_decode_uint64(packet)
|
||||
coords, _ := wire_decode_coords(packet[pTypeLen:])
|
||||
return coords
|
||||
}
|
||||
|
||||
// Returns a unique string for each stream of traffic
|
||||
// Equal to coords
|
||||
// The sender may append arbitrary info to the end of coords (as long as it's begins with a 0x00) to designate separate traffic streams
|
||||
// Currently, it's the IPv6 next header type and the first 2 uint16 of the next header
|
||||
// This is equivalent to the TCP/UDP protocol numbers and the source / dest ports
|
||||
// TODO figure out if something else would make more sense (other transport protocols?)
|
||||
func switch_getPacketStreamID(packet []byte) string {
|
||||
return string(switch_getPacketCoords(packet))
|
||||
}
|
||||
|
||||
// Returns the flowlabel from a given set of coords
|
||||
func switch_getFlowLabelFromCoords(in []byte) []byte {
|
||||
for i, v := range in {
|
||||
if v == 0 {
|
||||
return in[i+1:]
|
||||
}
|
||||
}
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// Find the best port for a given set of coords
|
||||
func (t *switchTable) bestPortForCoords(coords []byte) switchPort {
|
||||
table := t.getTable()
|
||||
var best switchPort
|
||||
bestDist := table.self.dist(coords)
|
||||
for to, elem := range table.elems {
|
||||
dist := elem.locator.dist(coords)
|
||||
if !(dist < bestDist) {
|
||||
continue
|
||||
}
|
||||
best = to
|
||||
bestDist = dist
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
// Handle an incoming packet
|
||||
// Either send it to ourself, or to the first idle peer that's free
|
||||
// Returns true if the packet has been handled somehow, false if it should be queued
|
||||
func (t *switchTable) _handleIn(packet []byte, idle map[switchPort]struct{}, sending map[switchPort]struct{}) bool {
|
||||
coords := switch_getPacketCoords(packet)
|
||||
closer := t.getCloser(coords)
|
||||
if len(closer) == 0 {
|
||||
// TODO? call the router directly, and remove the whole concept of a self peer?
|
||||
self := t.core.peers.getPorts()[0]
|
||||
self.sendPacketsFrom(t, [][]byte{packet})
|
||||
return true
|
||||
}
|
||||
var best *closerInfo
|
||||
ports := t.core.peers.getPorts()
|
||||
for _, cinfo := range closer {
|
||||
to := ports[cinfo.elem.port]
|
||||
//_, isIdle := idle[cinfo.elem.port]
|
||||
_, isSending := sending[cinfo.elem.port]
|
||||
var update bool
|
||||
switch {
|
||||
case to == nil:
|
||||
// no port was found, ignore it
|
||||
case isSending:
|
||||
// the port is busy, ignore it
|
||||
case best == nil:
|
||||
// this is the first idle port we've found, so select it until we find a
|
||||
// better candidate port to use instead
|
||||
update = true
|
||||
case cinfo.dist < best.dist:
|
||||
// the port takes a shorter path/is more direct than our current
|
||||
// candidate, so select that instead
|
||||
update = true
|
||||
case cinfo.dist > best.dist:
|
||||
// the port takes a longer path/is less direct than our current candidate,
|
||||
// ignore it
|
||||
case cinfo.elem.locator.tstamp > best.elem.locator.tstamp:
|
||||
// has a newer tstamp from the root, so presumably a better path
|
||||
update = true
|
||||
case cinfo.elem.locator.tstamp < best.elem.locator.tstamp:
|
||||
// has a n older tstamp, so presumably a worse path
|
||||
case cinfo.elem.time.Before(best.elem.time):
|
||||
// same tstamp, but got it earlier, so presumably a better path
|
||||
//t.core.log.Println("DEBUG new best:", best.elem.time, cinfo.elem.time)
|
||||
update = true
|
||||
default:
|
||||
// the search for a port has finished
|
||||
}
|
||||
if update {
|
||||
b := cinfo // because cinfo gets mutated by the iteration
|
||||
best = &b
|
||||
}
|
||||
}
|
||||
if best != nil {
|
||||
if _, isIdle := idle[best.elem.port]; isIdle {
|
||||
delete(idle, best.elem.port)
|
||||
ports[best.elem.port].sendPacketsFrom(t, [][]byte{packet})
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Didn't find anyone idle to send it to
|
||||
return false
|
||||
}
|
||||
|
||||
// Info about a buffered packet
|
||||
type switch_packetInfo struct {
|
||||
bytes []byte
|
||||
time time.Time // Timestamp of when the packet arrived
|
||||
}
|
||||
|
||||
// Used to keep track of buffered packets
|
||||
type switch_buffer struct {
|
||||
packets []switch_packetInfo // Currently buffered packets, which may be dropped if it grows too large
|
||||
size uint64 // Total queue size in bytes
|
||||
}
|
||||
|
||||
type switch_buffers struct {
|
||||
totalMaxSize uint64
|
||||
bufs map[string]switch_buffer // Buffers indexed by StreamID
|
||||
size uint64 // Total size of all buffers, in bytes
|
||||
maxbufs int
|
||||
maxsize uint64
|
||||
closer []closerInfo // Scratch space
|
||||
}
|
||||
|
||||
func (b *switch_buffers) _cleanup(t *switchTable) {
|
||||
for streamID, buf := range b.bufs {
|
||||
// Remove queues for which we have no next hop
|
||||
packet := buf.packets[0]
|
||||
coords := switch_getPacketCoords(packet.bytes)
|
||||
if len(t.getCloser(coords)) == 0 {
|
||||
for _, packet := range buf.packets {
|
||||
util.PutBytes(packet.bytes)
|
||||
}
|
||||
b.size -= buf.size
|
||||
delete(b.bufs, streamID)
|
||||
}
|
||||
}
|
||||
|
||||
for b.size > b.totalMaxSize {
|
||||
// Drop a random queue
|
||||
target := rand.Uint64() % b.size
|
||||
var size uint64 // running total
|
||||
for streamID, buf := range b.bufs {
|
||||
size += buf.size
|
||||
if size < target {
|
||||
continue
|
||||
}
|
||||
var packet switch_packetInfo
|
||||
packet, buf.packets = buf.packets[0], buf.packets[1:]
|
||||
buf.size -= uint64(len(packet.bytes))
|
||||
b.size -= uint64(len(packet.bytes))
|
||||
util.PutBytes(packet.bytes)
|
||||
if len(buf.packets) == 0 {
|
||||
delete(b.bufs, streamID)
|
||||
} else {
|
||||
// Need to update the map, since buf was retrieved by value
|
||||
b.bufs[streamID] = buf
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handles incoming idle notifications
|
||||
// Loops over packets and sends the newest one that's OK for this peer to send
|
||||
// Returns true if the peer is no longer idle, false if it should be added to the idle list
|
||||
func (t *switchTable) _handleIdle(port switchPort) bool {
|
||||
// TODO? only send packets for which this is the best next hop that isn't currently blocked sending
|
||||
to := t.core.peers.getPorts()[port]
|
||||
if to == nil {
|
||||
return true
|
||||
}
|
||||
var packets [][]byte
|
||||
var psize int
|
||||
t.queues._cleanup(t)
|
||||
now := time.Now()
|
||||
for psize < 65535 {
|
||||
var best *string
|
||||
var bestPriority float64
|
||||
for streamID, buf := range t.queues.bufs {
|
||||
// Filter over the streams that this node is closer to
|
||||
// Keep the one with the smallest queue
|
||||
packet := buf.packets[0]
|
||||
coords := switch_getPacketCoords(packet.bytes)
|
||||
priority := float64(now.Sub(packet.time)) / float64(buf.size)
|
||||
if priority >= bestPriority && t.portIsCloser(coords, port) {
|
||||
b := streamID // copy since streamID is mutated in the loop
|
||||
best = &b
|
||||
bestPriority = priority
|
||||
}
|
||||
}
|
||||
if best != nil {
|
||||
buf := t.queues.bufs[*best]
|
||||
var packet switch_packetInfo
|
||||
// TODO decide if this should be LIFO or FIFO
|
||||
packet, buf.packets = buf.packets[0], buf.packets[1:]
|
||||
buf.size -= uint64(len(packet.bytes))
|
||||
t.queues.size -= uint64(len(packet.bytes))
|
||||
if len(buf.packets) == 0 {
|
||||
delete(t.queues.bufs, *best)
|
||||
} else {
|
||||
// Need to update the map, since buf was retrieved by value
|
||||
t.queues.bufs[*best] = buf
|
||||
}
|
||||
packets = append(packets, packet.bytes)
|
||||
psize += len(packet.bytes)
|
||||
} else {
|
||||
// Finished finding packets
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(packets) > 0 {
|
||||
to.sendPacketsFrom(t, packets)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *switchTable) packetInFrom(from phony.Actor, bytes []byte) {
|
||||
t.Act(from, func() {
|
||||
t._packetIn(bytes)
|
||||
})
|
||||
}
|
||||
|
||||
func (t *switchTable) _packetIn(bytes []byte) {
|
||||
// Try to send it somewhere (or drop it if it's corrupt or at a dead end)
|
||||
if !t._handleIn(bytes, t.idle, t.sending) {
|
||||
// There's nobody free to take it right now, so queue it for later
|
||||
packet := switch_packetInfo{bytes, time.Now()}
|
||||
streamID := switch_getPacketStreamID(packet.bytes)
|
||||
buf, bufExists := t.queues.bufs[streamID]
|
||||
buf.packets = append(buf.packets, packet)
|
||||
buf.size += uint64(len(packet.bytes))
|
||||
t.queues.size += uint64(len(packet.bytes))
|
||||
// Keep a track of the max total queue size
|
||||
if t.queues.size > t.queues.maxsize {
|
||||
t.queues.maxsize = t.queues.size
|
||||
}
|
||||
t.queues.bufs[streamID] = buf
|
||||
if !bufExists {
|
||||
// Keep a track of the max total queue count. Only recalculate this
|
||||
// when the queue is new because otherwise repeating len(dict) might
|
||||
// cause unnecessary processing overhead
|
||||
if len(t.queues.bufs) > t.queues.maxbufs {
|
||||
t.queues.maxbufs = len(t.queues.bufs)
|
||||
}
|
||||
}
|
||||
t.queues._cleanup(t)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) _idleIn(port switchPort) {
|
||||
// Try to find something to send to this peer
|
||||
delete(t.sending, port)
|
||||
if !t._handleIdle(port) {
|
||||
// Didn't find anything ready to send yet, so stay idle
|
||||
t.idle[port] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) _sendingIn(port switchPort) {
|
||||
if _, isIn := t.idle[port]; !isIn {
|
||||
t.sending[port] = struct{}{}
|
||||
}
|
||||
}
|
|
@ -1,494 +0,0 @@
|
|||
package yggdrasil
|
||||
|
||||
// Wire formatting tools
|
||||
// These are all ugly and probably not very secure
|
||||
|
||||
// TODO clean up unused/commented code, and add better comments to whatever is left
|
||||
|
||||
// Packet types, as wire_encode_uint64(type) at the start of each packet
|
||||
|
||||
import (
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
)
|
||||
|
||||
const (
|
||||
wire_Traffic = iota // data being routed somewhere, handle for crypto
|
||||
wire_ProtocolTraffic // protocol traffic, pub keys for crypto
|
||||
wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto
|
||||
wire_SwitchMsg // inside link protocol traffic header
|
||||
wire_SessionPing // inside protocol traffic header
|
||||
wire_SessionPong // inside protocol traffic header
|
||||
wire_DHTLookupRequest // inside protocol traffic header
|
||||
wire_DHTLookupResponse // inside protocol traffic header
|
||||
wire_NodeInfoRequest // inside protocol traffic header
|
||||
wire_NodeInfoResponse // inside protocol traffic header
|
||||
)
|
||||
|
||||
// Calls wire_put_uint64 on a nil slice.
|
||||
func wire_encode_uint64(elem uint64) []byte {
|
||||
return wire_put_uint64(elem, nil)
|
||||
}
|
||||
|
||||
// Encode uint64 using a variable length scheme.
|
||||
// Similar to binary.Uvarint, but big-endian.
|
||||
func wire_put_uint64(e uint64, out []byte) []byte {
|
||||
var b [10]byte
|
||||
i := len(b) - 1
|
||||
b[i] = byte(e & 0x7f)
|
||||
for e >>= 7; e != 0; e >>= 7 {
|
||||
i--
|
||||
b[i] = byte(e | 0x80)
|
||||
}
|
||||
return append(out, b[i:]...)
|
||||
}
|
||||
|
||||
// Returns the length of a wire encoded uint64 of this value.
|
||||
func wire_uint64_len(elem uint64) int {
|
||||
l := 1
|
||||
for e := elem >> 7; e > 0; e >>= 7 {
|
||||
l++
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Decode uint64 from a []byte slice.
|
||||
// Returns the decoded uint64 and the number of bytes used.
|
||||
func wire_decode_uint64(bs []byte) (uint64, int) {
|
||||
length := 0
|
||||
elem := uint64(0)
|
||||
for _, b := range bs {
|
||||
elem <<= 7
|
||||
elem |= uint64(b & 0x7f)
|
||||
length++
|
||||
if b&0x80 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return elem, length
|
||||
}
|
||||
|
||||
// Converts an int64 into uint64 so it can be written to the wire.
|
||||
// Non-negative integers are mapped to even integers: 0 -> 0, 1 -> 2, etc.
|
||||
// Negative integers are mapped to odd integers: -1 -> 1, -2 -> 3, etc.
|
||||
// This means the least significant bit is a sign bit.
|
||||
// This is known as zigzag encoding.
|
||||
func wire_intToUint(i int64) uint64 {
|
||||
// signed arithmetic shift
|
||||
return uint64((i >> 63) ^ (i << 1))
|
||||
}
|
||||
|
||||
// Converts uint64 back to int64, genreally when being read from the wire.
|
||||
func wire_intFromUint(u uint64) int64 {
|
||||
// non-arithmetic shift
|
||||
return int64((u >> 1) ^ -(u & 1))
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Takes coords, returns coords prefixed with encoded coord length.
|
||||
func wire_encode_coords(coords []byte) []byte {
|
||||
coordLen := wire_encode_uint64(uint64(len(coords)))
|
||||
bs := make([]byte, 0, len(coordLen)+len(coords))
|
||||
bs = append(bs, coordLen...)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Puts a length prefix and the coords into bs, returns the wire formatted coords.
|
||||
// Useful in hot loops where we don't want to allocate and we know the rest of the later parts of the slice are safe to overwrite.
|
||||
func wire_put_coords(coords []byte, bs []byte) []byte {
|
||||
bs = wire_put_uint64(uint64(len(coords)), bs)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Takes a slice that begins with coords (starting with coord length).
|
||||
// Returns a slice of coords and the number of bytes read.
|
||||
// Used as part of various decode() functions for structs.
|
||||
func wire_decode_coords(packet []byte) ([]byte, int) {
|
||||
coordLen, coordBegin := wire_decode_uint64(packet)
|
||||
coordEnd := coordBegin + int(coordLen)
|
||||
if coordBegin == 0 || coordEnd > len(packet) {
|
||||
return nil, 0
|
||||
}
|
||||
return packet[coordBegin:coordEnd], coordEnd
|
||||
}
|
||||
|
||||
// Converts a []uint64 set of coords to a []byte set of coords.
|
||||
func wire_coordsUint64stoBytes(in []uint64) (out []byte) {
|
||||
for _, coord := range in {
|
||||
c := wire_encode_uint64(coord)
|
||||
out = append(out, c...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Converts a []byte set of coords to a []uint64 set of coords.
|
||||
func wire_coordsBytestoUint64s(in []byte) (out []uint64) {
|
||||
offset := 0
|
||||
for {
|
||||
coord, length := wire_decode_uint64(in[offset:])
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
out = append(out, coord)
|
||||
offset += length
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Encodes a swtichMsg into its wire format.
|
||||
func (m *switchMsg) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_SwitchMsg)
|
||||
bs = append(bs, m.Root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.TStamp))...)
|
||||
for _, hop := range m.Hops {
|
||||
bs = append(bs, wire_encode_uint64(uint64(hop.Port))...)
|
||||
bs = append(bs, hop.Next[:]...)
|
||||
bs = append(bs, hop.Sig[:]...)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
// Decodes a wire formatted switchMsg into the struct, returns true if successful.
|
||||
func (m *switchMsg) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_SwitchMsg:
|
||||
return false
|
||||
case !wire_chop_slice(m.Root[:], &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&tstamp, &bs):
|
||||
return false
|
||||
}
|
||||
m.TStamp = wire_intFromUint(tstamp)
|
||||
for len(bs) > 0 {
|
||||
var hop switchMsgHop
|
||||
switch {
|
||||
case !wire_chop_uint64((*uint64)(&hop.Port), &bs):
|
||||
return false
|
||||
case !wire_chop_slice(hop.Next[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(hop.Sig[:], &bs):
|
||||
return false
|
||||
}
|
||||
m.Hops = append(m.Hops, hop)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// A utility function used to copy bytes into a slice and advance the beginning of the source slice, returns true if successful.
|
||||
func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool {
|
||||
if len(*fromSlice) < len(toSlice) {
|
||||
return false
|
||||
}
|
||||
copy(toSlice, *fromSlice)
|
||||
*fromSlice = (*fromSlice)[len(toSlice):]
|
||||
return true
|
||||
}
|
||||
|
||||
// A utility function to extract coords from a slice and advance the source slices, returning true if successful.
|
||||
func wire_chop_coords(toCoords *[]byte, fromSlice *[]byte) bool {
|
||||
coords, coordLen := wire_decode_coords(*fromSlice)
|
||||
if coordLen == 0 {
|
||||
return false
|
||||
}
|
||||
*toCoords = append((*toCoords)[:0], coords...)
|
||||
*fromSlice = (*fromSlice)[coordLen:]
|
||||
return true
|
||||
}
|
||||
|
||||
// A utility function to extract a wire encoded uint64 into the provided pointer while advancing the start of the source slice, returning true if successful.
|
||||
func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
|
||||
dec, decLen := wire_decode_uint64(*fromSlice)
|
||||
if decLen == 0 {
|
||||
return false
|
||||
}
|
||||
*toUInt64 = dec
|
||||
*fromSlice = (*fromSlice)[decLen:]
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Wire traffic packets
|
||||
|
||||
// The wire format for ordinary IPv6 traffic encapsulated by the network.
|
||||
type wire_trafficPacket struct {
|
||||
Coords []byte
|
||||
Handle crypto.Handle
|
||||
Nonce crypto.BoxNonce
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
// Encodes a wire_trafficPacket into its wire format.
|
||||
func (p *wire_trafficPacket) encode() []byte {
|
||||
bs := util.GetBytes()
|
||||
bs = wire_put_uint64(wire_Traffic, bs)
|
||||
bs = wire_put_coords(p.Coords, bs)
|
||||
bs = append(bs, p.Handle[:]...)
|
||||
bs = append(bs, p.Nonce[:]...)
|
||||
bs = append(bs, p.Payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Decodes an encoded wire_trafficPacket into the struct, returning true if successful.
|
||||
func (p *wire_trafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_Traffic:
|
||||
return false
|
||||
case !wire_chop_coords(&p.Coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.Handle[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.Nonce[:], &bs):
|
||||
return false
|
||||
}
|
||||
p.Payload = append(util.GetBytes(), bs...)
|
||||
return true
|
||||
}
|
||||
|
||||
// The wire format for protocol traffic, such as dht req/res or session ping/pong packets.
|
||||
type wire_protoTrafficPacket struct {
|
||||
Coords []byte
|
||||
ToKey crypto.BoxPubKey
|
||||
FromKey crypto.BoxPubKey
|
||||
Nonce crypto.BoxNonce
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
// Encodes a wire_protoTrafficPacket into its wire format.
|
||||
func (p *wire_protoTrafficPacket) encode() []byte {
|
||||
coords := wire_encode_coords(p.Coords)
|
||||
bs := wire_encode_uint64(wire_ProtocolTraffic)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, p.ToKey[:]...)
|
||||
bs = append(bs, p.FromKey[:]...)
|
||||
bs = append(bs, p.Nonce[:]...)
|
||||
bs = append(bs, p.Payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Decodes an encoded wire_protoTrafficPacket into the struct, returning true if successful.
|
||||
func (p *wire_protoTrafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_ProtocolTraffic:
|
||||
return false
|
||||
case !wire_chop_coords(&p.Coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.ToKey[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.FromKey[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.Nonce[:], &bs):
|
||||
return false
|
||||
}
|
||||
p.Payload = bs
|
||||
return true
|
||||
}
|
||||
|
||||
// The wire format for link protocol traffic, namely switchMsg.
|
||||
// There's really two layers of this, with the outer layer using permanent keys, and the inner layer using ephemeral keys.
|
||||
// The keys themselves are exchanged as part of the connection setup, and then omitted from the packets.
|
||||
// The two layer logic is handled in peers.go, but it's kind of ugly.
|
||||
type wire_linkProtoTrafficPacket struct {
|
||||
Nonce crypto.BoxNonce
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
// Encodes a wire_linkProtoTrafficPacket into its wire format.
|
||||
func (p *wire_linkProtoTrafficPacket) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_LinkProtocolTraffic)
|
||||
bs = append(bs, p.Nonce[:]...)
|
||||
bs = append(bs, p.Payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Decodes an encoded wire_linkProtoTrafficPacket into the struct, returning true if successful.
|
||||
func (p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_LinkProtocolTraffic:
|
||||
return false
|
||||
case !wire_chop_slice(p.Nonce[:], &bs):
|
||||
return false
|
||||
}
|
||||
p.Payload = bs
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Encodes a sessionPing into its wire format.
|
||||
func (p *sessionPing) encode() []byte {
|
||||
var pTypeVal uint64
|
||||
if p.IsPong {
|
||||
pTypeVal = wire_SessionPong
|
||||
} else {
|
||||
pTypeVal = wire_SessionPing
|
||||
}
|
||||
bs := wire_encode_uint64(pTypeVal)
|
||||
//p.sendPermPub used in top level (crypto), so skipped here
|
||||
bs = append(bs, p.Handle[:]...)
|
||||
bs = append(bs, p.SendSesPub[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(p.Tstamp))...)
|
||||
coords := wire_encode_coords(p.Coords)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, wire_encode_uint64(uint64(p.MTU))...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Decodes an encoded sessionPing into the struct, returning true if successful.
|
||||
func (p *sessionPing) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
var mtu uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_SessionPing && pType != wire_SessionPong:
|
||||
return false
|
||||
//p.sendPermPub used in top level (crypto), so skipped here
|
||||
case !wire_chop_slice(p.Handle[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.SendSesPub[:], &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&tstamp, &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&p.Coords, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&mtu, &bs):
|
||||
mtu = 1280
|
||||
}
|
||||
p.Tstamp = wire_intFromUint(tstamp)
|
||||
if pType == wire_SessionPong {
|
||||
p.IsPong = true
|
||||
}
|
||||
p.MTU = MTU(mtu)
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Encodes a nodeinfoReqRes into its wire format.
|
||||
func (p *nodeinfoReqRes) encode() []byte {
|
||||
var pTypeVal uint64
|
||||
if p.IsResponse {
|
||||
pTypeVal = wire_NodeInfoResponse
|
||||
} else {
|
||||
pTypeVal = wire_NodeInfoRequest
|
||||
}
|
||||
bs := wire_encode_uint64(pTypeVal)
|
||||
bs = wire_put_coords(p.SendCoords, bs)
|
||||
if pTypeVal == wire_NodeInfoResponse {
|
||||
bs = append(bs, p.NodeInfo...)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
// Decodes an encoded nodeinfoReqRes into the struct, returning true if successful.
|
||||
func (p *nodeinfoReqRes) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_NodeInfoRequest && pType != wire_NodeInfoResponse:
|
||||
return false
|
||||
case !wire_chop_coords(&p.SendCoords, &bs):
|
||||
return false
|
||||
}
|
||||
if p.IsResponse = pType == wire_NodeInfoResponse; p.IsResponse {
|
||||
if len(bs) == 0 {
|
||||
return false
|
||||
}
|
||||
p.NodeInfo = make(NodeInfoPayload, len(bs))
|
||||
if !wire_chop_slice(p.NodeInfo[:], &bs) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Encodes a dhtReq into its wire format.
|
||||
func (r *dhtReq) encode() []byte {
|
||||
coords := wire_encode_coords(r.Coords)
|
||||
bs := wire_encode_uint64(wire_DHTLookupRequest)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.Dest[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Decodes an encoded dhtReq into the struct, returning true if successful.
|
||||
func (r *dhtReq) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_DHTLookupRequest:
|
||||
return false
|
||||
case !wire_chop_coords(&r.Coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(r.Dest[:], &bs):
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Encodes a dhtRes into its wire format.
|
||||
func (r *dhtRes) encode() []byte {
|
||||
coords := wire_encode_coords(r.Coords)
|
||||
bs := wire_encode_uint64(wire_DHTLookupResponse)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.Dest[:]...)
|
||||
for _, info := range r.Infos {
|
||||
coords = wire_encode_coords(info.coords)
|
||||
bs = append(bs, info.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
// Decodes an encoded dhtRes into the struct, returning true if successful.
|
||||
func (r *dhtRes) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_DHTLookupResponse:
|
||||
return false
|
||||
case !wire_chop_coords(&r.Coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(r.Dest[:], &bs):
|
||||
return false
|
||||
}
|
||||
for len(bs) > 0 {
|
||||
info := dhtInfo{}
|
||||
switch {
|
||||
case !wire_chop_slice(info.key[:], &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&info.coords, &bs):
|
||||
return false
|
||||
}
|
||||
r.Infos = append(r.Infos, &info)
|
||||
}
|
||||
return true
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue