mirror of
https://github.com/yggdrasil-network/yggdrasil-go.git
synced 2025-04-29 14:45:07 +03:00
Simplify reconfiguration
This commit is contained in:
parent
764f9c8e11
commit
fc9a1c6c31
13 changed files with 64 additions and 141 deletions
|
@ -20,6 +20,7 @@ type Core struct {
|
|||
// This is the main data structure that holds everything else for a node
|
||||
// We're going to keep our own copy of the provided config - that way we can
|
||||
// guarantee that it will be covered by the mutex
|
||||
phony.Inbox
|
||||
config config.NodeState // Config
|
||||
boxPub crypto.BoxPubKey
|
||||
boxPriv crypto.BoxPrivKey
|
||||
|
@ -112,47 +113,14 @@ func (c *Core) addPeerLoop() {
|
|||
// config.NodeConfig and then signals the various module goroutines to
|
||||
// reconfigure themselves if needed.
|
||||
func (c *Core) UpdateConfig(config *config.NodeConfig) {
|
||||
c.log.Infoln("Reloading node configuration...")
|
||||
c.log.Debugln("Reloading node configuration...")
|
||||
|
||||
// Replace the active configuration with the supplied one
|
||||
c.config.Replace(*config)
|
||||
errors := 0
|
||||
|
||||
// Each reconfigure function should pass any errors to the channel, then close it
|
||||
components := map[phony.Actor][]func(chan error){
|
||||
&c.router: []func(chan error){
|
||||
c.router.reconfigure,
|
||||
c.router.dht.reconfigure,
|
||||
c.router.searches.reconfigure,
|
||||
c.router.sessions.reconfigure,
|
||||
},
|
||||
&c.switchTable: []func(chan error){
|
||||
c.switchTable.reconfigure,
|
||||
c.link.reconfigure,
|
||||
c.peers.reconfigure,
|
||||
},
|
||||
}
|
||||
|
||||
// TODO: We count errors here but honestly that provides us with absolutely no
|
||||
// benefit over components reporting errors themselves, so maybe we can use
|
||||
// actor.Act() here instead and stop counting errors
|
||||
for actor, functions := range components {
|
||||
for _, function := range functions {
|
||||
response := make(chan error)
|
||||
phony.Block(actor, func() {
|
||||
function(response)
|
||||
})
|
||||
for err := range response {
|
||||
c.log.Errorln(err)
|
||||
errors++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errors > 0 {
|
||||
c.log.Warnln(errors, "node module(s) reported errors during configuration reload")
|
||||
} else {
|
||||
c.log.Infoln("Node configuration reloaded successfully")
|
||||
}
|
||||
// Notify the router and switch about the new configuration
|
||||
c.router.Act(c, c.router.reconfigure)
|
||||
c.switchTable.Act(c, c.switchTable.reconfigure)
|
||||
}
|
||||
|
||||
// Start starts up Yggdrasil using the provided config.NodeConfig, and outputs
|
||||
|
|
|
@ -82,8 +82,7 @@ func (t *dht) init(r *router) {
|
|||
t.reset()
|
||||
}
|
||||
|
||||
func (t *dht) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
func (t *dht) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything to do
|
||||
}
|
||||
|
||||
|
|
|
@ -79,13 +79,8 @@ func (l *link) init(c *Core) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *link) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
tcpResponse := make(chan error)
|
||||
l.tcp.reconfigure(tcpResponse)
|
||||
for err := range tcpResponse {
|
||||
e <- err
|
||||
}
|
||||
func (l *link) reconfigure() {
|
||||
l.tcp.reconfigure()
|
||||
}
|
||||
|
||||
func (l *link) call(uri string, sintf string) error {
|
||||
|
|
|
@ -34,8 +34,7 @@ func (ps *peers) init(c *Core) {
|
|||
ps.core = c
|
||||
}
|
||||
|
||||
func (ps *peers) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
func (ps *peers) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything to do
|
||||
}
|
||||
|
||||
|
|
|
@ -73,18 +73,20 @@ func (r *router) init(core *Core) {
|
|||
r.sessions.init(r)
|
||||
}
|
||||
|
||||
func (r *router) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
var errs []error
|
||||
// Reconfigures the router and any child modules. This should only ever be run
|
||||
// by the router actor.
|
||||
func (r *router) reconfigure() {
|
||||
// Reconfigure the router
|
||||
current := r.core.config.GetCurrent()
|
||||
err := r.nodeinfo.setNodeInfo(current.NodeInfo, current.NodeInfoPrivacy)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
for _, err := range errs {
|
||||
e <- err
|
||||
if err := r.nodeinfo.setNodeInfo(current.NodeInfo, current.NodeInfoPrivacy); err != nil {
|
||||
r.core.log.Errorln("Error reloading NodeInfo:", err)
|
||||
} else {
|
||||
r.core.log.Infoln("NodeInfo updated")
|
||||
}
|
||||
// Reconfigure children
|
||||
r.dht.reconfigure()
|
||||
r.searches.reconfigure()
|
||||
r.sessions.reconfigure()
|
||||
}
|
||||
|
||||
// Starts the tickerLoop goroutine.
|
||||
|
|
|
@ -55,8 +55,7 @@ func (s *searches) init(r *router) {
|
|||
s.searches = make(map[crypto.NodeID]*searchInfo)
|
||||
}
|
||||
|
||||
func (s *searches) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
func (s *searches) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything to do
|
||||
}
|
||||
|
||||
|
|
|
@ -73,8 +73,7 @@ type sessionInfo struct {
|
|||
callbacks []chan func() // Finished work from crypto workers
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
func (sinfo *sessionInfo) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything to do
|
||||
}
|
||||
|
||||
|
@ -161,17 +160,9 @@ func (ss *sessions) init(r *router) {
|
|||
ss.lastCleanup = time.Now()
|
||||
}
|
||||
|
||||
func (ss *sessions) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
responses := make(map[crypto.Handle]chan error)
|
||||
for index, session := range ss.sinfos {
|
||||
responses[index] = make(chan error)
|
||||
session.reconfigure(responses[index])
|
||||
}
|
||||
for _, response := range responses {
|
||||
for err := range response {
|
||||
e <- err
|
||||
}
|
||||
func (ss *sessions) reconfigure() {
|
||||
for _, session := range ss.sinfos {
|
||||
session.reconfigure()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -199,9 +199,10 @@ func (t *switchTable) init(core *Core) {
|
|||
})
|
||||
}
|
||||
|
||||
func (t *switchTable) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
func (t *switchTable) reconfigure() {
|
||||
// This is where reconfiguration would go, if we had anything useful to do.
|
||||
t.core.link.reconfigure()
|
||||
t.core.peers.reconfigure()
|
||||
}
|
||||
|
||||
// Safely gets a copy of this node's locator.
|
||||
|
|
|
@ -95,8 +95,7 @@ func (t *tcp) init(l *link) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *tcp) reconfigure(e chan error) {
|
||||
defer close(e)
|
||||
func (t *tcp) reconfigure() {
|
||||
t.link.core.config.Mutex.RLock()
|
||||
added := util.Difference(t.link.core.config.Current.Listen, t.link.core.config.Previous.Listen)
|
||||
deleted := util.Difference(t.link.core.config.Previous.Listen, t.link.core.config.Current.Listen)
|
||||
|
@ -107,7 +106,9 @@ func (t *tcp) reconfigure(e chan error) {
|
|||
continue
|
||||
}
|
||||
if _, err := t.listen(a[6:]); err != nil {
|
||||
e <- err
|
||||
t.link.core.log.Errorln("Error adding TCP", a[6:], "listener:", err)
|
||||
} else {
|
||||
t.link.core.log.Infoln("Started TCP listener:", a[6:])
|
||||
}
|
||||
}
|
||||
for _, d := range deleted {
|
||||
|
@ -118,6 +119,7 @@ func (t *tcp) reconfigure(e chan error) {
|
|||
if listener, ok := t.listeners[d[6:]]; ok {
|
||||
t.mutex.Unlock()
|
||||
listener.Stop <- true
|
||||
t.link.core.log.Infoln("Stopped TCP listener:", d[6:])
|
||||
} else {
|
||||
t.mutex.Unlock()
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue