mirror of
				https://github.com/yggdrasil-network/yggdrasil-go.git
				synced 2025-11-04 11:15:07 +03:00 
			
		
		
		
	Merge pull request #92 from Arceliar/backpressure
Use backpressure instead of estimated bandwidth
This commit is contained in:
		
						commit
						bbae9ff8e8
					
				
					 3 changed files with 25 additions and 48 deletions
				
			
		| 
						 | 
				
			
			@ -25,7 +25,6 @@ package yggdrasil
 | 
			
		|||
import "time"
 | 
			
		||||
import "sync"
 | 
			
		||||
import "sync/atomic"
 | 
			
		||||
import "math"
 | 
			
		||||
 | 
			
		||||
//import "fmt"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -86,7 +85,7 @@ func (ps *peers) putPorts(ports map[switchPort]*peer) {
 | 
			
		|||
type peer struct {
 | 
			
		||||
	// Rolling approximation of bandwidth, in bps, used by switch, updated by packet sends
 | 
			
		||||
	// use get/update methods only! (atomic accessors as float64)
 | 
			
		||||
	bandwidth  uint64
 | 
			
		||||
	queueSize  int64
 | 
			
		||||
	bytesSent  uint64 // To track bandwidth usage for getPeers
 | 
			
		||||
	bytesRecvd uint64 // To track bandwidth usage for getPeers
 | 
			
		||||
	// BUG: sync/atomic, 32 bit platforms need the above to be the first element
 | 
			
		||||
| 
						 | 
				
			
			@ -116,22 +115,12 @@ type peer struct {
 | 
			
		|||
 | 
			
		||||
const peer_Throttle = 1
 | 
			
		||||
 | 
			
		||||
func (p *peer) getBandwidth() float64 {
 | 
			
		||||
	bits := atomic.LoadUint64(&p.bandwidth)
 | 
			
		||||
	return math.Float64frombits(bits)
 | 
			
		||||
func (p *peer) getQueueSize() int64 {
 | 
			
		||||
	return atomic.LoadInt64(&p.queueSize)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *peer) updateBandwidth(bytes int, duration time.Duration) {
 | 
			
		||||
	if p == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	for ok := false; !ok; {
 | 
			
		||||
		oldBits := atomic.LoadUint64(&p.bandwidth)
 | 
			
		||||
		oldBandwidth := math.Float64frombits(oldBits)
 | 
			
		||||
		bandwidth := oldBandwidth*7/8 + float64(bytes)/duration.Seconds()
 | 
			
		||||
		bits := math.Float64bits(bandwidth)
 | 
			
		||||
		ok = atomic.CompareAndSwapUint64(&p.bandwidth, oldBits, bits)
 | 
			
		||||
	}
 | 
			
		||||
func (p *peer) updateQueueSize(delta int64) {
 | 
			
		||||
	atomic.AddInt64(&p.queueSize, delta)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ps *peers) newPeer(box *boxPubKey,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -12,6 +12,7 @@ package yggdrasil
 | 
			
		|||
//  A little annoying to do with constant changes from bandwidth estimates
 | 
			
		||||
 | 
			
		||||
import "time"
 | 
			
		||||
import "sort"
 | 
			
		||||
import "sync"
 | 
			
		||||
import "sync/atomic"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -401,37 +402,36 @@ func (t *switchTable) updateTable() {
 | 
			
		|||
			port:    pinfo.port,
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
	sort.SliceStable(newTable.elems, func(i, j int) bool {
 | 
			
		||||
		return t.data.peers[newTable.elems[i].port].firstSeen.Before(t.data.peers[newTable.elems[j].port].firstSeen)
 | 
			
		||||
	})
 | 
			
		||||
	t.table.Store(newTable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *switchTable) lookup(dest []byte, ttl uint64) (switchPort, uint64) {
 | 
			
		||||
	t.updater.Load().(*sync.Once).Do(t.updateTable)
 | 
			
		||||
	table := t.table.Load().(lookupTable)
 | 
			
		||||
	ports := t.core.peers.getPorts()
 | 
			
		||||
	getBandwidth := func(port switchPort) float64 {
 | 
			
		||||
		var bandwidth float64
 | 
			
		||||
		if p, isIn := ports[port]; isIn {
 | 
			
		||||
			bandwidth = p.getBandwidth()
 | 
			
		||||
		}
 | 
			
		||||
		return bandwidth
 | 
			
		||||
	}
 | 
			
		||||
	var best switchPort
 | 
			
		||||
	myDist := table.self.dist(dest) //getDist(table.self.coords)
 | 
			
		||||
	if !(uint64(myDist) < ttl) {
 | 
			
		||||
		return 0, 0
 | 
			
		||||
	}
 | 
			
		||||
	// score is in units of bandwidth / distance
 | 
			
		||||
	bestScore := float64(-1)
 | 
			
		||||
	// cost is in units of (expected distance) + (expected queue size), where expected distance is used as an approximation of the minimum backpressure gradient needed for packets to flow
 | 
			
		||||
	ports := t.core.peers.getPorts()
 | 
			
		||||
	var best switchPort
 | 
			
		||||
	bestCost := int64(^uint64(0) >> 1)
 | 
			
		||||
	for _, info := range table.elems {
 | 
			
		||||
		dist := info.locator.dist(dest) //getDist(info.locator.coords)
 | 
			
		||||
		if !(dist < myDist) {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		score := getBandwidth(info.port)
 | 
			
		||||
		score /= float64(1 + dist)
 | 
			
		||||
		if score > bestScore {
 | 
			
		||||
		p, isIn := ports[info.port]
 | 
			
		||||
		if !isIn {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		cost := int64(dist) + p.getQueueSize()
 | 
			
		||||
		if cost < bestCost {
 | 
			
		||||
			best = info.port
 | 
			
		||||
			bestScore = score
 | 
			
		||||
			bestCost = cost
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	//t.core.log.Println("DEBUG: sending to", best, "bandwidth", getBandwidth(best))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -218,26 +218,12 @@ func (iface *tcpInterface) handler(sock net.Conn, incoming bool) {
 | 
			
		|||
	buf := bufio.NewWriterSize(sock, tcp_msgSize)
 | 
			
		||||
	send := func(msg []byte) {
 | 
			
		||||
		msgLen := wire_encode_uint64(uint64(len(msg)))
 | 
			
		||||
		before := buf.Buffered()
 | 
			
		||||
		start := time.Now()
 | 
			
		||||
		buf.Write(tcp_msg[:])
 | 
			
		||||
		buf.Write(msgLen)
 | 
			
		||||
		buf.Write(msg)
 | 
			
		||||
		timed := time.Since(start)
 | 
			
		||||
		after := buf.Buffered()
 | 
			
		||||
		written := (before + len(tcp_msg) + len(msgLen) + len(msg)) - after
 | 
			
		||||
		if written > 0 {
 | 
			
		||||
			p.updateBandwidth(written, timed)
 | 
			
		||||
		}
 | 
			
		||||
		p.updateQueueSize(-1)
 | 
			
		||||
		util_putBytes(msg)
 | 
			
		||||
	}
 | 
			
		||||
	flush := func() {
 | 
			
		||||
		size := buf.Buffered()
 | 
			
		||||
		start := time.Now()
 | 
			
		||||
		buf.Flush()
 | 
			
		||||
		timed := time.Since(start)
 | 
			
		||||
		p.updateBandwidth(size, timed)
 | 
			
		||||
	}
 | 
			
		||||
	go func() {
 | 
			
		||||
		var stack [][]byte
 | 
			
		||||
		put := func(msg []byte) {
 | 
			
		||||
| 
						 | 
				
			
			@ -245,6 +231,7 @@ func (iface *tcpInterface) handler(sock net.Conn, incoming bool) {
 | 
			
		|||
			for len(stack) > 32 {
 | 
			
		||||
				util_putBytes(stack[0])
 | 
			
		||||
				stack = stack[1:]
 | 
			
		||||
				p.updateQueueSize(-1)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		for msg := range out {
 | 
			
		||||
| 
						 | 
				
			
			@ -254,7 +241,7 @@ func (iface *tcpInterface) handler(sock net.Conn, incoming bool) {
 | 
			
		|||
				select {
 | 
			
		||||
				case msg, ok := <-out:
 | 
			
		||||
					if !ok {
 | 
			
		||||
						flush()
 | 
			
		||||
						buf.Flush()
 | 
			
		||||
						return
 | 
			
		||||
					}
 | 
			
		||||
					put(msg)
 | 
			
		||||
| 
						 | 
				
			
			@ -264,13 +251,14 @@ func (iface *tcpInterface) handler(sock net.Conn, incoming bool) {
 | 
			
		|||
					send(msg)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			flush()
 | 
			
		||||
			buf.Flush()
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
	p.out = func(msg []byte) {
 | 
			
		||||
		defer func() { recover() }()
 | 
			
		||||
		select {
 | 
			
		||||
		case out <- msg:
 | 
			
		||||
			p.updateQueueSize(1)
 | 
			
		||||
		default:
 | 
			
		||||
			util_putBytes(msg)
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue