|
@ -1,11 +1,12 @@ |
|
|
package basichost |
|
|
package basichost |
|
|
|
|
|
|
|
|
import ( |
|
|
import ( |
|
|
"context" |
|
|
"net" |
|
|
|
|
|
"strconv" |
|
|
"sync" |
|
|
"sync" |
|
|
|
|
|
|
|
|
goprocess "github.com/jbenet/goprocess" |
|
|
goprocess "github.com/jbenet/goprocess" |
|
|
lgbl "github.com/libp2p/go-libp2p-loggables" |
|
|
goprocessctx "github.com/jbenet/goprocess/context" |
|
|
inat "github.com/libp2p/go-libp2p-nat" |
|
|
inat "github.com/libp2p/go-libp2p-nat" |
|
|
inet "github.com/libp2p/go-libp2p-net" |
|
|
inet "github.com/libp2p/go-libp2p-net" |
|
|
ma "github.com/multiformats/go-multiaddr" |
|
|
ma "github.com/multiformats/go-multiaddr" |
|
@ -37,11 +38,14 @@ func NewNATManager(net inet.Network) NATManager { |
|
|
// * closing the natManager closes the nat and its mappings.
|
|
|
// * closing the natManager closes the nat and its mappings.
|
|
|
type natManager struct { |
|
|
type natManager struct { |
|
|
net inet.Network |
|
|
net inet.Network |
|
|
natmu sync.RWMutex // guards nat (ready could obviate this mutex, but safety first.)
|
|
|
natmu sync.RWMutex |
|
|
nat *inat.NAT |
|
|
nat *inat.NAT |
|
|
|
|
|
|
|
|
ready chan struct{} // closed once the nat is ready to process port mappings
|
|
|
ready chan struct{} // closed once the nat is ready to process port mappings
|
|
|
proc goprocess.Process // natManager has a process + children. can be closed.
|
|
|
|
|
|
|
|
|
refreshMu sync.Mutex |
|
|
|
|
|
|
|
|
|
|
|
proc goprocess.Process // natManager has a process + children. can be closed.
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func newNatManager(net inet.Network) *natManager { |
|
|
func newNatManager(net inet.Network) *natManager { |
|
@ -74,7 +78,6 @@ func (nmgr *natManager) Ready() <-chan struct{} { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func (nmgr *natManager) discoverNAT() { |
|
|
func (nmgr *natManager) discoverNAT() { |
|
|
|
|
|
|
|
|
nmgr.proc.Go(func(worker goprocess.Process) { |
|
|
nmgr.proc.Go(func(worker goprocess.Process) { |
|
|
// inat.DiscoverNAT blocks until the nat is found or a timeout
|
|
|
// inat.DiscoverNAT blocks until the nat is found or a timeout
|
|
|
// is reached. we unfortunately cannot specify timeouts-- the
|
|
|
// is reached. we unfortunately cannot specify timeouts-- the
|
|
@ -87,131 +90,137 @@ func (nmgr *natManager) discoverNAT() { |
|
|
// to avoid leaking resources in a non-obvious way. the only case
|
|
|
// to avoid leaking resources in a non-obvious way. the only case
|
|
|
// this affects is when the daemon is being started up and _immediately_
|
|
|
// this affects is when the daemon is being started up and _immediately_
|
|
|
// asked to close. other services are also starting up, so ok to wait.
|
|
|
// asked to close. other services are also starting up, so ok to wait.
|
|
|
discoverdone := make(chan struct{}) |
|
|
|
|
|
var nat *inat.NAT |
|
|
natInstance, err := inat.DiscoverNAT(goprocessctx.OnClosingContext(worker)) |
|
|
go func() { |
|
|
if err != nil { |
|
|
defer close(discoverdone) |
|
|
log.Error("DiscoverNAT error:", err) |
|
|
nat = inat.DiscoverNAT() |
|
|
close(nmgr.ready) |
|
|
}() |
|
|
|
|
|
|
|
|
|
|
|
// by this point -- after finding the NAT -- we may have already
|
|
|
|
|
|
// be closing. if so, just exit.
|
|
|
|
|
|
select { |
|
|
|
|
|
case <-worker.Closing(): |
|
|
|
|
|
return |
|
|
return |
|
|
case <-discoverdone: |
|
|
|
|
|
if nat == nil { // no nat, or failed to get it.
|
|
|
|
|
|
return |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// wire up the nat to close when nmgr closes.
|
|
|
|
|
|
// nmgr.proc is our parent, and waiting for us.
|
|
|
|
|
|
nmgr.proc.AddChild(nat.Process()) |
|
|
|
|
|
|
|
|
|
|
|
// set the nat.
|
|
|
|
|
|
nmgr.natmu.Lock() |
|
|
nmgr.natmu.Lock() |
|
|
nmgr.nat = nat |
|
|
nmgr.nat = natInstance |
|
|
nmgr.natmu.Unlock() |
|
|
nmgr.natmu.Unlock() |
|
|
|
|
|
|
|
|
// signal that we're ready to process nat mappings:
|
|
|
|
|
|
close(nmgr.ready) |
|
|
close(nmgr.ready) |
|
|
|
|
|
|
|
|
|
|
|
// wire up the nat to close when nmgr closes.
|
|
|
|
|
|
// nmgr.proc is our parent, and waiting for us.
|
|
|
|
|
|
nmgr.proc.AddChild(nmgr.nat.Process()) |
|
|
|
|
|
|
|
|
// sign natManager up for network notifications
|
|
|
// sign natManager up for network notifications
|
|
|
// we need to sign up here to avoid missing some notifs
|
|
|
// we need to sign up here to avoid missing some notifs
|
|
|
// before the NAT has been found.
|
|
|
// before the NAT has been found.
|
|
|
nmgr.net.Notify((*nmgrNetNotifiee)(nmgr)) |
|
|
nmgr.net.Notify((*nmgrNetNotifiee)(nmgr)) |
|
|
|
|
|
nmgr.refresh() |
|
|
// if any interfaces were brought up while we were setting up
|
|
|
|
|
|
// the nat, now is the time to setup port mappings for them.
|
|
|
|
|
|
// we release ready, then grab them to avoid losing any. adding
|
|
|
|
|
|
// a port mapping is idempotent, so its ok to add the same twice.
|
|
|
|
|
|
addrs := nmgr.net.ListenAddresses() |
|
|
|
|
|
for _, addr := range addrs { |
|
|
|
|
|
// we do it async because it's slow and we may want to close beforehand
|
|
|
|
|
|
go addPortMapping(nmgr, addr) |
|
|
|
|
|
} |
|
|
|
|
|
}) |
|
|
}) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// NAT returns the natManager's nat object. this may be nil, if
|
|
|
func (nmgr *natManager) refresh() { |
|
|
// (a) the search process is still ongoing, or (b) the search process
|
|
|
|
|
|
// found no nat. Clients must check whether the return value is nil.
|
|
|
|
|
|
func (nmgr *natManager) NAT() *inat.NAT { |
|
|
|
|
|
nmgr.natmu.Lock() |
|
|
|
|
|
defer nmgr.natmu.Unlock() |
|
|
|
|
|
return nmgr.nat |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func addPortMapping(nmgr *natManager, intaddr ma.Multiaddr) { |
|
|
|
|
|
nat := nmgr.NAT() |
|
|
nat := nmgr.NAT() |
|
|
if nat == nil { |
|
|
if nat == nil { |
|
|
panic("natManager addPortMapping called without a nat.") |
|
|
// Nothing to do.
|
|
|
|
|
|
return |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// first, check if the port mapping already exists.
|
|
|
nmgr.proc.Go(func(_ goprocess.Process) { |
|
|
for _, mapping := range nat.Mappings() { |
|
|
nmgr.refreshMu.Lock() |
|
|
if mapping.InternalAddr().Equal(intaddr) { |
|
|
defer nmgr.refreshMu.Unlock() |
|
|
return // it exists! return.
|
|
|
|
|
|
|
|
|
ports := map[string]map[int]bool{ |
|
|
|
|
|
"tcp": map[int]bool{}, |
|
|
|
|
|
"udp": map[int]bool{}, |
|
|
} |
|
|
} |
|
|
} |
|
|
for _, maddr := range nmgr.net.ListenAddresses() { |
|
|
|
|
|
// Strip the IP
|
|
|
|
|
|
maIP, rest := ma.SplitFirst(maddr) |
|
|
|
|
|
if maIP == nil || rest == nil { |
|
|
|
|
|
continue |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
ctx := context.TODO() |
|
|
switch maIP.Protocol().Code { |
|
|
lm := make(lgbl.DeferredMap) |
|
|
case ma.P_IP6, ma.P_IP4: |
|
|
lm["internalAddr"] = func() interface{} { return intaddr.String() } |
|
|
default: |
|
|
|
|
|
continue |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
defer log.EventBegin(ctx, "natMgrAddPortMappingWait", lm).Done() |
|
|
// Only bother if we're listening on a
|
|
|
|
|
|
// unicast/unspecified IP.
|
|
|
|
|
|
ip := net.IP(maIP.RawValue()) |
|
|
|
|
|
if !(ip.IsGlobalUnicast() || ip.IsUnspecified()) { |
|
|
|
|
|
continue |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
select { |
|
|
// Extract the port/protocol
|
|
|
case <-nmgr.proc.Closing(): |
|
|
proto, _ := ma.SplitFirst(rest) |
|
|
lm["outcome"] = "cancelled" |
|
|
if proto == nil { |
|
|
return // no use.
|
|
|
continue |
|
|
case <-nmgr.ready: // wait until it's ready.
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// actually start the port map (sub-event because waiting may take a while)
|
|
|
var protocol string |
|
|
defer log.EventBegin(ctx, "natMgrAddPortMapping", lm).Done() |
|
|
switch proto.Protocol().Code { |
|
|
|
|
|
case ma.P_TCP: |
|
|
|
|
|
protocol = "tcp" |
|
|
|
|
|
case ma.P_UDP: |
|
|
|
|
|
protocol = "udp" |
|
|
|
|
|
default: |
|
|
|
|
|
continue |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
// get the nat
|
|
|
port, err := strconv.ParseUint(proto.Value(), 10, 16) |
|
|
m, err := nat.NewMapping(intaddr) |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
// bug in multiaddr
|
|
|
lm["outcome"] = "failure" |
|
|
panic(err) |
|
|
lm["error"] = err |
|
|
} |
|
|
return |
|
|
ports[protocol][int(port)] = false |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
extaddr, err := m.ExternalAddr() |
|
|
var wg sync.WaitGroup |
|
|
if err != nil { |
|
|
defer wg.Wait() |
|
|
lm["outcome"] = "failure" |
|
|
|
|
|
lm["error"] = err |
|
|
// Close old mappings
|
|
|
return |
|
|
for _, m := range nat.Mappings() { |
|
|
} |
|
|
mappedPort := m.InternalPort() |
|
|
|
|
|
if _, ok := ports[m.Protocol()][mappedPort]; !ok { |
|
|
|
|
|
// No longer need this mapping.
|
|
|
|
|
|
wg.Add(1) |
|
|
|
|
|
go func(m inat.Mapping) { |
|
|
|
|
|
defer wg.Done() |
|
|
|
|
|
m.Close() |
|
|
|
|
|
}(m) |
|
|
|
|
|
} else { |
|
|
|
|
|
// already mapped
|
|
|
|
|
|
ports[m.Protocol()][mappedPort] = true |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
lm["outcome"] = "success" |
|
|
// Create new mappings.
|
|
|
lm["externalAddr"] = func() interface{} { return extaddr.String() } |
|
|
for proto, pports := range ports { |
|
|
log.Infof("established nat port mapping: %s <--> %s", intaddr, extaddr) |
|
|
for port, mapped := range pports { |
|
|
|
|
|
if mapped { |
|
|
|
|
|
continue |
|
|
|
|
|
} |
|
|
|
|
|
wg.Add(1) |
|
|
|
|
|
go func(proto string, port int) { |
|
|
|
|
|
defer wg.Done() |
|
|
|
|
|
_, err := nat.NewMapping(proto, port) |
|
|
|
|
|
if err != nil { |
|
|
|
|
|
log.Errorf("failed to port-map %s port %d: %s", proto, port, err) |
|
|
|
|
|
} |
|
|
|
|
|
}(proto, port) |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
}) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func rmPortMapping(nmgr *natManager, intaddr ma.Multiaddr) { |
|
|
// NAT returns the natManager's nat object. this may be nil, if
|
|
|
nat := nmgr.NAT() |
|
|
// (a) the search process is still ongoing, or (b) the search process
|
|
|
if nat == nil { |
|
|
// found no nat. Clients must check whether the return value is nil.
|
|
|
panic("natManager rmPortMapping called without a nat.") |
|
|
func (nmgr *natManager) NAT() *inat.NAT { |
|
|
} |
|
|
nmgr.natmu.Lock() |
|
|
|
|
|
defer nmgr.natmu.Unlock() |
|
|
// list the port mappings (it may be gone on it's own, so we need to
|
|
|
return nmgr.nat |
|
|
// check this list, and not store it ourselves behind the scenes)
|
|
|
|
|
|
|
|
|
|
|
|
// close mappings for this internal address.
|
|
|
|
|
|
for _, mapping := range nat.Mappings() { |
|
|
|
|
|
if mapping.InternalAddr().Equal(intaddr) { |
|
|
|
|
|
mapping.Close() |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// nmgrNetNotifiee implements the network notification listening part
|
|
|
|
|
|
// of the natManager. this is merely listening to Listen() and ListenClose()
|
|
|
|
|
|
// events.
|
|
|
|
|
|
type nmgrNetNotifiee natManager |
|
|
type nmgrNetNotifiee natManager |
|
|
|
|
|
|
|
|
func (nn *nmgrNetNotifiee) natManager() *natManager { |
|
|
func (nn *nmgrNetNotifiee) natManager() *natManager { |
|
@ -219,19 +228,11 @@ func (nn *nmgrNetNotifiee) natManager() *natManager { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func (nn *nmgrNetNotifiee) Listen(n inet.Network, addr ma.Multiaddr) { |
|
|
func (nn *nmgrNetNotifiee) Listen(n inet.Network, addr ma.Multiaddr) { |
|
|
if nn.natManager().NAT() == nil { |
|
|
nn.natManager().refresh() |
|
|
return // not ready or doesnt exist.
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
addPortMapping(nn.natManager(), addr) |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func (nn *nmgrNetNotifiee) ListenClose(n inet.Network, addr ma.Multiaddr) { |
|
|
func (nn *nmgrNetNotifiee) ListenClose(n inet.Network, addr ma.Multiaddr) { |
|
|
if nn.natManager().NAT() == nil { |
|
|
nn.natManager().refresh() |
|
|
return // not ready or doesnt exist.
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
rmPortMapping(nn.natManager(), addr) |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func (nn *nmgrNetNotifiee) Connected(inet.Network, inet.Conn) {} |
|
|
func (nn *nmgrNetNotifiee) Connected(inet.Network, inet.Conn) {} |
|
|