Commit 51fd99e3 authored by Jeromy's avatar Jeromy
Browse files

extract from 0.4.0

parent 5a0162c7
package mocknet package mocknet
import ( import (
"sync"
"time" "time"
) )
// A ratelimiter is used by a link to determine how long to wait before sending // A ratelimiter is used by a link to determine how long to wait before sending
// data given a bandwidth cap. // data given a bandwidth cap.
type ratelimiter struct { type ratelimiter struct {
lock sync.Mutex
bandwidth float64 // bytes per nanosecond bandwidth float64 // bytes per nanosecond
allowance float64 // in bytes allowance float64 // in bytes
maxAllowance float64 // in bytes maxAllowance float64 // in bytes
...@@ -29,6 +31,8 @@ func NewRatelimiter(bandwidth float64) *ratelimiter { ...@@ -29,6 +31,8 @@ func NewRatelimiter(bandwidth float64) *ratelimiter {
// Changes bandwidth of a ratelimiter and resets its allowance // Changes bandwidth of a ratelimiter and resets its allowance
func (r *ratelimiter) UpdateBandwidth(bandwidth float64) { func (r *ratelimiter) UpdateBandwidth(bandwidth float64) {
r.lock.Lock()
defer r.lock.Unlock()
// Convert bandwidth from bytes/second to bytes/nanosecond // Convert bandwidth from bytes/second to bytes/nanosecond
b := bandwidth / float64(time.Second) b := bandwidth / float64(time.Second)
r.bandwidth = b r.bandwidth = b
...@@ -40,6 +44,8 @@ func (r *ratelimiter) UpdateBandwidth(bandwidth float64) { ...@@ -40,6 +44,8 @@ func (r *ratelimiter) UpdateBandwidth(bandwidth float64) {
// Returns how long to wait before sending data with length 'dataSize' bytes // Returns how long to wait before sending data with length 'dataSize' bytes
func (r *ratelimiter) Limit(dataSize int) time.Duration { func (r *ratelimiter) Limit(dataSize int) time.Duration {
r.lock.Lock()
defer r.lock.Unlock()
// update time // update time
var duration time.Duration = time.Duration(0) var duration time.Duration = time.Duration(0)
if r.bandwidth == 0 { if r.bandwidth == 0 {
......
...@@ -3,14 +3,14 @@ package addrutil ...@@ -3,14 +3,14 @@ package addrutil
import ( import (
"fmt" "fmt"
logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" logging "QmWRypnfEwrgH4k93KEHN5hng7VjKYkWmzDYRuTZeh2Mgh/go-log"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" manet "github.com/jbenet/go-multiaddr-net"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
) )
var log = logging.Logger("p2p/net/swarm/addr") var log = logging.Logger("github.com/ipfs/go-libp2p/p2p/net/swarm/addr")
// SupportedTransportStrings is the list of supported transports for the swarm. // SupportedTransportStrings is the list of supported transports for the swarm.
// These are strings of encapsulated multiaddr protocols. E.g.: // These are strings of encapsulated multiaddr protocols. E.g.:
......
...@@ -3,8 +3,8 @@ package addrutil ...@@ -3,8 +3,8 @@ package addrutil
import ( import (
"testing" "testing"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" manet "github.com/jbenet/go-multiaddr-net"
) )
func newMultiaddr(t *testing.T, s string) ma.Multiaddr { func newMultiaddr(t *testing.T, s string) ma.Multiaddr {
......
...@@ -2,6 +2,7 @@ package swarm ...@@ -2,6 +2,7 @@ package swarm
import ( import (
"net" "net"
"sort"
"sync" "sync"
"testing" "testing"
"time" "time"
...@@ -9,12 +10,12 @@ import ( ...@@ -9,12 +10,12 @@ import (
addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr" addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr"
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
testutil "github.com/ipfs/go-ipfs/util/testutil" testutil "util/testutil"
ci "github.com/ipfs/go-ipfs/util/testutil/ci" ci "util/testutil/ci"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" manet "github.com/jbenet/go-multiaddr-net"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
) )
func acceptAndHang(l net.Listener) { func acceptAndHang(l net.Listener) {
...@@ -419,18 +420,18 @@ func TestDialBackoffClears(t *testing.T) { ...@@ -419,18 +420,18 @@ func TestDialBackoffClears(t *testing.T) {
} }
s1.peers.AddAddrs(s2.local, ifaceAddrs1, peer.PermanentAddrTTL) s1.peers.AddAddrs(s2.local, ifaceAddrs1, peer.PermanentAddrTTL)
before = time.Now() if _, err := s1.Dial(ctx, s2.local); err == nil {
t.Fatal("should have failed to dial backed off peer")
}
time.Sleep(baseBackoffTime)
if c, err := s1.Dial(ctx, s2.local); err != nil { if c, err := s1.Dial(ctx, s2.local); err != nil {
t.Fatal(err) t.Fatal(err)
} else { } else {
c.Close() c.Close()
t.Log("correctly connected") t.Log("correctly connected")
} }
duration = time.Now().Sub(before)
if duration >= dt {
// t.Error("took too long", duration, dt)
}
if s1.backf.Backoff(s2.local) { if s1.backf.Backoff(s2.local) {
t.Error("s2 should no longer be on backoff") t.Error("s2 should no longer be on backoff")
...@@ -438,3 +439,38 @@ func TestDialBackoffClears(t *testing.T) { ...@@ -438,3 +439,38 @@ func TestDialBackoffClears(t *testing.T) {
t.Log("correctly cleared backoff") t.Log("correctly cleared backoff")
} }
} }
func mkAddr(t *testing.T, s string) ma.Multiaddr {
a, err := ma.NewMultiaddr(s)
if err != nil {
t.Fatal(err)
}
return a
}
func TestAddressSorting(t *testing.T) {
u1 := mkAddr(t, "/ip4/152.12.23.53/udp/1234/utp")
u2l := mkAddr(t, "/ip4/127.0.0.1/udp/1234/utp")
local := mkAddr(t, "/ip4/127.0.0.1/tcp/1234")
norm := mkAddr(t, "/ip4/6.5.4.3/tcp/1234")
l := AddrList{local, u1, u2l, norm}
sort.Sort(l)
if !l[0].Equal(u2l) {
t.Fatal("expected utp local addr to be sorted first: ", l[0])
}
if !l[1].Equal(u1) {
t.Fatal("expected utp addr to be sorted second")
}
if !l[2].Equal(local) {
t.Fatal("expected tcp localhost addr thid")
}
if !l[3].Equal(norm) {
t.Fatal("expected normal addr last")
}
}
...@@ -5,8 +5,8 @@ import ( ...@@ -5,8 +5,8 @@ import (
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
) )
func TestPeers(t *testing.T) { func TestPeers(t *testing.T) {
......
package swarm package swarm
import ( import (
"runtime"
"sync" "sync"
"testing" "testing"
"time" "time"
ci "github.com/ipfs/go-ipfs/util/testutil/ci"
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
ci "util/testutil/ci"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
) )
func TestSimultOpen(t *testing.T) { func TestSimultOpen(t *testing.T) {
...@@ -49,7 +50,8 @@ func TestSimultOpenMany(t *testing.T) { ...@@ -49,7 +50,8 @@ func TestSimultOpenMany(t *testing.T) {
addrs := 20 addrs := 20
rounds := 10 rounds := 10
if ci.IsRunning() { if ci.IsRunning() || runtime.GOOS == "darwin" {
// osx has a limit of 256 file descriptors
addrs = 10 addrs = 10
rounds = 5 rounds = 5
} }
......
...@@ -7,22 +7,26 @@ import ( ...@@ -7,22 +7,26 @@ import (
"sync" "sync"
"time" "time"
logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" metrics "github.com/ipfs/go-libp2p/p2p/metrics"
mconn "github.com/ipfs/go-libp2p/p2p/metrics/conn"
inet "github.com/ipfs/go-libp2p/p2p/net" inet "github.com/ipfs/go-libp2p/p2p/net"
conn "github.com/ipfs/go-libp2p/p2p/net/conn"
filter "github.com/ipfs/go-libp2p/p2p/net/filter" filter "github.com/ipfs/go-libp2p/p2p/net/filter"
addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr" addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr"
transport "github.com/ipfs/go-libp2p/p2p/net/transport"
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
metrics "github.com/ipfs/go-libp2p/util/metrics"
ma "github.com/jbenet/go-multiaddr"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ps "github.com/jbenet/go-peerstream"
ps "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream" pst "github.com/jbenet/go-stream-muxer"
pst "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer" psmss "github.com/jbenet/go-stream-muxer/multistream"
psy "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/yamux" "github.com/jbenet/goprocess"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" goprocessctx "github.com/jbenet/goprocess/context"
goprocessctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" prom "github.com/prometheus/client_golang/prometheus"
prom "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus" mafilter "github.com/whyrusleeping/multiaddr-filter"
mafilter "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/multiaddr-filter" context "golang.org/x/net/context"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
logging "QmWRypnfEwrgH4k93KEHN5hng7VjKYkWmzDYRuTZeh2Mgh/go-log"
) )
var log = logging.Logger("swarm2") var log = logging.Logger("swarm2")
...@@ -37,9 +41,7 @@ var peersTotal = prom.NewGaugeVec(prom.GaugeOpts{ ...@@ -37,9 +41,7 @@ var peersTotal = prom.NewGaugeVec(prom.GaugeOpts{
}, []string{"peer_id"}) }, []string{"peer_id"})
func init() { func init() {
tpt := *psy.DefaultTransport PSTransport = psmss.NewTransport()
tpt.MaxStreamWindowSize = 512 * 1024
PSTransport = &tpt
} }
// Swarm is a connection muxer, allowing connections to other peers to // Swarm is a connection muxer, allowing connections to other peers to
...@@ -58,12 +60,19 @@ type Swarm struct { ...@@ -58,12 +60,19 @@ type Swarm struct {
backf dialbackoff backf dialbackoff
dialT time.Duration // mainly for tests dialT time.Duration // mainly for tests
dialer *conn.Dialer
notifmu sync.RWMutex notifmu sync.RWMutex
notifs map[inet.Notifiee]ps.Notifiee notifs map[inet.Notifiee]ps.Notifiee
transports []transport.Transport
// filters for addresses that shouldnt be dialed // filters for addresses that shouldnt be dialed
Filters *filter.Filters Filters *filter.Filters
// file descriptor rate limited
fdRateLimit chan struct{}
proc goprocess.Process proc goprocess.Process
ctx context.Context ctx context.Context
bwc metrics.Reporter bwc metrics.Reporter
...@@ -78,15 +87,22 @@ func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr, ...@@ -78,15 +87,22 @@ func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr,
return nil, err return nil, err
} }
wrap := func(c transport.Conn) transport.Conn {
return mconn.WrapConn(bwc, c)
}
s := &Swarm{ s := &Swarm{
swarm: ps.NewSwarm(PSTransport), swarm: ps.NewSwarm(PSTransport),
local: local, local: local,
peers: peers, peers: peers,
ctx: ctx, ctx: ctx,
dialT: DialTimeout, dialT: DialTimeout,
notifs: make(map[inet.Notifiee]ps.Notifiee), notifs: make(map[inet.Notifiee]ps.Notifiee),
bwc: bwc, transports: []transport.Transport{transport.NewTCPTransport()},
Filters: filter.NewFilters(), bwc: bwc,
fdRateLimit: make(chan struct{}, concurrentFdDials),
Filters: filter.NewFilters(),
dialer: conn.NewDialer(local, peers.PrivKey(local), wrap),
} }
// configure Swarm // configure Swarm
...@@ -97,7 +113,12 @@ func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr, ...@@ -97,7 +113,12 @@ func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr,
prom.MustRegisterOrGet(peersTotal) prom.MustRegisterOrGet(peersTotal)
s.Notify((*metricsNotifiee)(s)) s.Notify((*metricsNotifiee)(s))
return s, s.listen(listenAddrs) err = s.setupInterfaces(listenAddrs)
if err != nil {
return nil, err
}
return s, nil
} }
func (s *Swarm) teardown() error { func (s *Swarm) teardown() error {
...@@ -130,7 +151,7 @@ func (s *Swarm) Listen(addrs ...ma.Multiaddr) error { ...@@ -130,7 +151,7 @@ func (s *Swarm) Listen(addrs ...ma.Multiaddr) error {
return err return err
} }
return s.listen(addrs) return s.setupInterfaces(addrs)
} }
// Process returns the Process of the swarm // Process returns the Process of the swarm
......
...@@ -4,7 +4,7 @@ import ( ...@@ -4,7 +4,7 @@ import (
conn "github.com/ipfs/go-libp2p/p2p/net/conn" conn "github.com/ipfs/go-libp2p/p2p/net/conn"
addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr" addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
) )
// ListenAddresses returns a list of addresses at which this swarm listens. // ListenAddresses returns a list of addresses at which this swarm listens.
......
...@@ -3,13 +3,13 @@ package swarm ...@@ -3,13 +3,13 @@ package swarm
import ( import (
"testing" "testing"
testutil "github.com/ipfs/go-ipfs/util/testutil" metrics "github.com/ipfs/go-libp2p/p2p/metrics"
addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr" addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr"
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
metrics "github.com/ipfs/go-libp2p/util/metrics" testutil "util/testutil"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
) )
func TestFilterAddrs(t *testing.T) { func TestFilterAddrs(t *testing.T) {
......
...@@ -8,9 +8,9 @@ import ( ...@@ -8,9 +8,9 @@ import (
conn "github.com/ipfs/go-libp2p/p2p/net/conn" conn "github.com/ipfs/go-libp2p/p2p/net/conn"
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
ps "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream" ps "github.com/jbenet/go-peerstream"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
) )
// a Conn is a simple wrapper around a ps.Conn that also exposes // a Conn is a simple wrapper around a ps.Conn that also exposes
......
package swarm package swarm
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"math/rand" "sort"
"net"
"sync" "sync"
"time" "time"
"github.com/jbenet/go-multiaddr-net"
conn "github.com/ipfs/go-libp2p/p2p/net/conn" conn "github.com/ipfs/go-libp2p/p2p/net/conn"
addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr" addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr"
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
lgbl "github.com/ipfs/go-libp2p/util/eventlog/loggables" lgbl "util/eventlog/loggables"
mconn "github.com/ipfs/go-libp2p/util/metrics/conn"
ma "github.com/jbenet/go-multiaddr"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" context "golang.org/x/net/context"
manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
processctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context"
ratelimit "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/ratelimit"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
) )
// Diagram of dial sync: // Diagram of dial sync:
...@@ -44,6 +40,9 @@ var ( ...@@ -44,6 +40,9 @@ var (
// add loop back in Dial(.) // add loop back in Dial(.)
const dialAttempts = 1 const dialAttempts = 1
// number of concurrent outbound dials over transports that consume file descriptors
const concurrentFdDials = 160
// DialTimeout is the amount of time each dial attempt has. We can think about making // DialTimeout is the amount of time each dial attempt has. We can think about making
// this larger down the road, or putting more granular timeouts (i.e. within each // this larger down the road, or putting more granular timeouts (i.e. within each
// subcomponent of Dial) // subcomponent of Dial)
...@@ -115,6 +114,7 @@ func (ds *dialsync) Unlock(dst peer.ID) { ...@@ -115,6 +114,7 @@ func (ds *dialsync) Unlock(dst peer.ID) {
if !found { if !found {
panic("called dialDone with no ongoing dials to peer: " + dst.Pretty()) panic("called dialDone with no ongoing dials to peer: " + dst.Pretty())
} }
delete(ds.ongoing, dst) // remove ongoing dial delete(ds.ongoing, dst) // remove ongoing dial
close(wait) // release everyone else close(wait) // release everyone else
ds.lock.Unlock() ds.lock.Unlock()
...@@ -145,44 +145,71 @@ func (ds *dialsync) Unlock(dst peer.ID) { ...@@ -145,44 +145,71 @@ func (ds *dialsync) Unlock(dst peer.ID) {
// dialbackoff.Clear(p) // dialbackoff.Clear(p)
// } // }
// //
type dialbackoff struct { type dialbackoff struct {
entries map[peer.ID]struct{} entries map[peer.ID]*backoffPeer
lock sync.RWMutex lock sync.RWMutex
} }
type backoffPeer struct {
tries int
until time.Time
}
func (db *dialbackoff) init() { func (db *dialbackoff) init() {
if db.entries == nil { if db.entries == nil {
db.entries = make(map[peer.ID]struct{}) db.entries = make(map[peer.ID]*backoffPeer)
} }
} }
// Backoff returns whether the client should backoff from dialing // Backoff returns whether the client should backoff from dialing
// peeer p // peer p
func (db *dialbackoff) Backoff(p peer.ID) bool { func (db *dialbackoff) Backoff(p peer.ID) (backoff bool) {
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock()
db.init() db.init()
_, found := db.entries[p] bp, found := db.entries[p]
db.lock.Unlock() if found && time.Now().Before(bp.until) {
return found return true
}
return false
} }
const baseBackoffTime = time.Second * 5
const maxBackoffTime = time.Minute * 5
// AddBackoff lets other nodes know that we've entered backoff with // AddBackoff lets other nodes know that we've entered backoff with
// peer p, so dialers should not wait unnecessarily. We still will // peer p, so dialers should not wait unnecessarily. We still will
// attempt to dial with one goroutine, in case we get through. // attempt to dial with one goroutine, in case we get through.
func (db *dialbackoff) AddBackoff(p peer.ID) { func (db *dialbackoff) AddBackoff(p peer.ID) {
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock()
db.init() db.init()
db.entries[p] = struct{}{} bp, ok := db.entries[p]
db.lock.Unlock() if !ok {
db.entries[p] = &backoffPeer{
tries: 1,
until: time.Now().Add(baseBackoffTime),
}
return
}
expTimeAdd := time.Second * time.Duration(bp.tries*bp.tries)
if expTimeAdd > maxBackoffTime {
expTimeAdd = maxBackoffTime
}
bp.until = time.Now().Add(baseBackoffTime + expTimeAdd)
bp.tries++
} }
// Clear removes a backoff record. Clients should call this after a // Clear removes a backoff record. Clients should call this after a
// successful Dial. // successful Dial.
func (db *dialbackoff) Clear(p peer.ID) { func (db *dialbackoff) Clear(p peer.ID) {
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock()
db.init() db.init()
delete(db.entries, p) delete(db.entries, p)
db.lock.Unlock()
} }
// Dial connects to a peer. // Dial connects to a peer.
...@@ -225,14 +252,20 @@ func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error) ...@@ -225,14 +252,20 @@ func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error)
// check if there's an ongoing dial to this peer // check if there's an ongoing dial to this peer
if ok, wait := s.dsync.Lock(p); ok { if ok, wait := s.dsync.Lock(p); ok {
defer s.dsync.Unlock(p)
// if this peer has been backed off, lets get out of here
if s.backf.Backoff(p) {
log.Event(ctx, "swarmDialBackoff", logdial)
return nil, ErrDialBackoff
}
// ok, we have been charged to dial! let's do it. // ok, we have been charged to dial! let's do it.
// if it succeeds, dial will add the conn to the swarm itself. // if it succeeds, dial will add the conn to the swarm itself.
defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done() defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done()
ctxT, cancel := context.WithTimeout(ctx, s.dialT) ctxT, cancel := context.WithTimeout(ctx, s.dialT)
conn, err := s.dial(ctxT, p) conn, err := s.dial(ctxT, p)
cancel() cancel()
s.dsync.Unlock(p)
log.Debugf("dial end %s", conn) log.Debugf("dial end %s", conn)
if err != nil { if err != nil {
log.Event(ctx, "swarmDialBackoffAdd", logdial) log.Event(ctx, "swarmDialBackoffAdd", logdial)
...@@ -287,14 +320,6 @@ func (s *Swarm) dial(ctx context.Context, p peer.ID) (*Conn, error) { ...@@ -287,14 +320,6 @@ func (s *Swarm) dial(ctx context.Context, p peer.ID) (*Conn, error) {
log.Debug("Dial not given PrivateKey, so WILL NOT SECURE conn.") log.Debug("Dial not given PrivateKey, so WILL NOT SECURE conn.")
} }
// get our own addrs. try dialing out from our listener addresses (reusing ports)
// Note that using our peerstore's addresses here is incorrect, as that would
// include observed addresses. TODO: make peerstore's address book smarter.
localAddrs := s.ListenAddresses()
if len(localAddrs) == 0 {
log.Debug("Dialing out with no local addresses.")
}
// get remote peer addrs // get remote peer addrs
remoteAddrs := s.peers.Addrs(p) remoteAddrs := s.peers.Addrs(p)
// make sure we can use the addresses. // make sure we can use the addresses.
...@@ -319,23 +344,8 @@ func (s *Swarm) dial(ctx context.Context, p peer.ID) (*Conn, error) { ...@@ -319,23 +344,8 @@ func (s *Swarm) dial(ctx context.Context, p peer.ID) (*Conn, error) {
return nil, err return nil, err
} }
// open connection to peer
d := &conn.Dialer{
Dialer: manet.Dialer{
Dialer: net.Dialer{
Timeout: s.dialT,
},
},
LocalPeer: s.local,
LocalAddrs: localAddrs,
PrivateKey: sk,
Wrapper: func(c manet.Conn) manet.Conn {
return mconn.WrapConn(s.bwc, c)
},
}
// try to get a connection to any addr // try to get a connection to any addr
connC, err := s.dialAddrs(ctx, d, p, remoteAddrs) connC, err := s.dialAddrs(ctx, p, remoteAddrs)
if err != nil { if err != nil {
logdial["error"] = err logdial["error"] = err
return nil, err return nil, err
...@@ -355,7 +365,10 @@ func (s *Swarm) dial(ctx context.Context, p peer.ID) (*Conn, error) { ...@@ -355,7 +365,10 @@ func (s *Swarm) dial(ctx context.Context, p peer.ID) (*Conn, error) {
return swarmC, nil return swarmC, nil
} }
func (s *Swarm) dialAddrs(ctx context.Context, d *conn.Dialer, p peer.ID, remoteAddrs []ma.Multiaddr) (conn.Conn, error) { func (s *Swarm) dialAddrs(ctx context.Context, p peer.ID, remoteAddrs []ma.Multiaddr) (conn.Conn, error) {
// sort addresses so preferred addresses are dialed sooner
sort.Sort(AddrList(remoteAddrs))
// try to connect to one of the peer's known addresses. // try to connect to one of the peer's known addresses.
// we dial concurrently to each of the addresses, which: // we dial concurrently to each of the addresses, which:
...@@ -367,78 +380,89 @@ func (s *Swarm) dialAddrs(ctx context.Context, d *conn.Dialer, p peer.ID, remote ...@@ -367,78 +380,89 @@ func (s *Swarm) dialAddrs(ctx context.Context, d *conn.Dialer, p peer.ID, remote
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() // cancel work when we exit func defer cancel() // cancel work when we exit func
foundConn := make(chan struct{}) conns := make(chan conn.Conn)
conns := make(chan conn.Conn, len(remoteAddrs))
errs := make(chan error, len(remoteAddrs)) errs := make(chan error, len(remoteAddrs))
// dialSingleAddr is used in the rate-limited async thing below. // dialSingleAddr is used in the rate-limited async thing below.
dialSingleAddr := func(addr ma.Multiaddr) { dialSingleAddr := func(addr ma.Multiaddr) {
connC, err := s.dialAddr(ctx, d, p, addr) // rebind chans in scope so we can nil them out easily
connsout := conns
errsout := errs
connC, err := s.dialAddr(ctx, p, addr)
if err != nil {
connsout = nil
} else if connC == nil {
// NOTE: this really should never happen
log.Errorf("failed to dial %s %s and got no error!", p, addr)
err = fmt.Errorf("failed to dial %s %s", p, addr)
connsout = nil
} else {
errsout = nil
}
// check parent still wants our results // check parent still wants our results
select { select {
case <-foundConn: case <-ctx.Done():
if connC != nil { if connC != nil {
connC.Close() connC.Close()
} }
return case errsout <- err:
default: case connsout <- connC:
}
if err != nil {
errs <- err
} else if connC == nil {
errs <- fmt.Errorf("failed to dial %s %s", p, addr)
} else {
conns <- connC
} }
} }
// this whole thing is in a goroutine so we can use foundConn // this whole thing is in a goroutine so we can use foundConn
// to end early. // to end early.
go func() { go func() {
// rate limiting just in case. at most 10 addrs at once. limiter := make(chan struct{}, 8)
limiter := ratelimit.NewRateLimiter(process.Background(), 10) for _, addr := range remoteAddrs {
limiter.Go(func(worker process.Process) { // returns whatever ratelimiting is acceptable for workerAddr.
// permute addrs so we try different sets first each time. // may not rate limit at all.
for _, i := range rand.Perm(len(remoteAddrs)) { rl := s.addrDialRateLimit(addr)
select { select {
case <-foundConn: // if one of them succeeded already case <-ctx.Done(): // our context was cancelled
break return
case <-worker.Closing(): // our context was cancelled case rl <- struct{}{}:
break // take the token, move on
default: }
}
select {
workerAddr := remoteAddrs[i] // shadow variable to avoid race case <-ctx.Done(): // our context was cancelled
limiter.LimitedGo(func(worker process.Process) { return
dialSingleAddr(workerAddr) case limiter <- struct{}{}:
}) // take the token, move on
} }
})
processctx.CloseAfterContext(limiter, ctx) go func(rlc <-chan struct{}, a ma.Multiaddr) {
dialSingleAddr(a)
<-limiter
<-rlc
}(rl, addr)
}
}() }()
// wair fot the results. // wair for the results.
exitErr := fmt.Errorf("failed to dial %s", p) exitErr := fmt.Errorf("failed to dial %s", p)
for i := 0; i < len(remoteAddrs); i++ { for range remoteAddrs {
select { select {
case exitErr = <-errs: // case exitErr = <-errs: //
log.Debug("dial error: ", exitErr) log.Debug("dial error: ", exitErr)
case connC := <-conns: case connC := <-conns:
// take the first + return asap // take the first + return asap
close(foundConn)
return connC, nil return connC, nil
case <-ctx.Done():
// break out and return error
break
} }
} }
return nil, exitErr return nil, exitErr
} }
func (s *Swarm) dialAddr(ctx context.Context, d *conn.Dialer, p peer.ID, addr ma.Multiaddr) (conn.Conn, error) { func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (conn.Conn, error) {
log.Debugf("%s swarm dialing %s %s", s.local, p, addr) log.Debugf("%s swarm dialing %s %s", s.local, p, addr)
connC, err := d.Dial(ctx, addr, p) connC, err := s.dialer.Dial(ctx, addr, p)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s --> %s dial attempt failed: %s", s.local, p, err) return nil, fmt.Errorf("%s --> %s dial attempt failed: %s", s.local, p, err)
} }
...@@ -491,3 +515,72 @@ func dialConnSetup(ctx context.Context, s *Swarm, connC conn.Conn) (*Conn, error ...@@ -491,3 +515,72 @@ func dialConnSetup(ctx context.Context, s *Swarm, connC conn.Conn) (*Conn, error
return swarmC, err return swarmC, err
} }
// addrDialRateLimit returns a ratelimiting channel for dialing transport
// addrs like a. for example, tcp is fd-ratelimited. utp is not ratelimited.
func (s *Swarm) addrDialRateLimit(a ma.Multiaddr) chan struct{} {
if isFDCostlyTransport(a) {
return s.fdRateLimit
}
// do not rate limit it at all
return make(chan struct{}, 1)
}
func isFDCostlyTransport(a ma.Multiaddr) bool {
return isTCPMultiaddr(a)
}
func isTCPMultiaddr(a ma.Multiaddr) bool {
p := a.Protocols()
return len(p) == 2 && (p[0].Name == "ip4" || p[0].Name == "ip6") && p[1].Name == "tcp"
}
type AddrList []ma.Multiaddr
func (al AddrList) Len() int {
return len(al)
}
func (al AddrList) Swap(i, j int) {
al[i], al[j] = al[j], al[i]
}
func (al AddrList) Less(i, j int) bool {
a := al[i]
b := al[j]
// dial localhost addresses next, they should fail immediately
lba := manet.IsIPLoopback(a)
lbb := manet.IsIPLoopback(b)
if lba {
if !lbb {
return true
}
}
// dial utp and similar 'non-fd-consuming' addresses first
fda := isFDCostlyTransport(a)
fdb := isFDCostlyTransport(b)
if !fda {
if fdb {
return true
}
// if neither consume fd's, assume equal ordering
return false
}
// if 'b' doesnt take a file descriptor
if !fdb {
return false
}
// if 'b' is loopback and both take file descriptors
if lbb {
return false
}
// for the rest, just sort by bytes
return bytes.Compare(a.Bytes(), b.Bytes()) > 0
}
...@@ -3,68 +3,81 @@ package swarm ...@@ -3,68 +3,81 @@ package swarm
import ( import (
"fmt" "fmt"
mconn "github.com/ipfs/go-libp2p/p2p/metrics/conn"
inet "github.com/ipfs/go-libp2p/p2p/net" inet "github.com/ipfs/go-libp2p/p2p/net"
conn "github.com/ipfs/go-libp2p/p2p/net/conn" conn "github.com/ipfs/go-libp2p/p2p/net/conn"
addrutil "github.com/ipfs/go-libp2p/p2p/net/swarm/addr" transport "github.com/ipfs/go-libp2p/p2p/net/transport"
lgbl "github.com/ipfs/go-libp2p/util/eventlog/loggables" lgbl "util/eventlog/loggables"
mconn "github.com/ipfs/go-libp2p/util/metrics/conn"
ma "github.com/jbenet/go-multiaddr"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ps "github.com/jbenet/go-peerstream"
manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" context "golang.org/x/net/context"
ps "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
multierr "github.com/ipfs/go-ipfs/thirdparty/multierr"
) )
// Open listeners for each network the swarm should listen on // Open listeners and reuse-dialers for the given addresses
func (s *Swarm) listen(addrs []ma.Multiaddr) error { func (s *Swarm) setupInterfaces(addrs []ma.Multiaddr) error {
errs := make([]error, len(addrs))
var succeeded int
for i, a := range addrs {
tpt := s.transportForAddr(a)
if tpt == nil {
errs[i] = fmt.Errorf("no transport for address: %s", a)
continue
}
for _, addr := range addrs { d, err := tpt.Dialer(a, transport.TimeoutOpt(DialTimeout), transport.ReusePorts)
if !addrutil.AddrUsable(addr, true) { if err != nil {
return fmt.Errorf("cannot use addr: %s", addr) errs[i] = err
continue
} }
}
retErr := multierr.New() s.dialer.AddDialer(d)
// listen on every address list, err := tpt.Listen(a)
for i, addr := range addrs {
err := s.setupListener(addr)
if err != nil { if err != nil {
if retErr.Errors == nil { errs[i] = err
retErr.Errors = make([]error, len(addrs)) continue
}
retErr.Errors[i] = err
log.Debugf("Failed to listen on: %s - %s", addr, err)
} }
err = s.addListener(list)
if err != nil {
errs[i] = err
continue
}
succeeded++
} }
if retErr.Errors != nil { for i, e := range errs {
return retErr if e != nil {
log.Warning("listen on %s failed: %s", addrs[i], errs[i])
}
} }
if succeeded == 0 && len(addrs) > 0 {
return fmt.Errorf("failed to listen on any addresses: %s", errs)
}
return nil return nil
} }
// Listen for new connections on the given multiaddr func (s *Swarm) transportForAddr(a ma.Multiaddr) transport.Transport {
func (s *Swarm) setupListener(maddr ma.Multiaddr) error { for _, t := range s.transports {
if t.Matches(a) {
return t
}
}
return nil
}
// TODO rethink how this has to work. (jbenet) func (s *Swarm) addListener(tptlist transport.Listener) error {
//
// resolved, err := resolveUnspecifiedAddresses([]ma.Multiaddr{maddr})
// if err != nil {
// return err
// }
// for _, a := range resolved {
// s.peers.AddAddr(s.local, a)
// }
sk := s.peers.PrivKey(s.local) sk := s.peers.PrivKey(s.local)
if sk == nil { if sk == nil {
// may be fine for sk to be nil, just log a warning. // may be fine for sk to be nil, just log a warning.
log.Warning("Listener not given PrivateKey, so WILL NOT SECURE conns.") log.Warning("Listener not given PrivateKey, so WILL NOT SECURE conns.")
} }
log.Debugf("Swarm Listening at %s", maddr)
list, err := conn.Listen(s.Context(), maddr, s.local, sk) list, err := conn.WrapTransportListener(s.Context(), tptlist, s.local, sk)
if err != nil { if err != nil {
return err return err
} }
...@@ -72,11 +85,15 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error { ...@@ -72,11 +85,15 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error {
list.SetAddrFilters(s.Filters) list.SetAddrFilters(s.Filters)
if cw, ok := list.(conn.ListenerConnWrapper); ok { if cw, ok := list.(conn.ListenerConnWrapper); ok {
cw.SetConnWrapper(func(c manet.Conn) manet.Conn { cw.SetConnWrapper(func(c transport.Conn) transport.Conn {
return mconn.WrapConn(s.bwc, c) return mconn.WrapConn(s.bwc, c)
}) })
} }
return s.addConnListener(list)
}
func (s *Swarm) addConnListener(list conn.Listener) error {
// AddListener to the peerstream Listener. this will begin accepting connections // AddListener to the peerstream Listener. this will begin accepting connections
// and streams! // and streams!
sl, err := s.swarm.AddListener(list) sl, err := s.swarm.AddListener(list)
...@@ -85,6 +102,8 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error { ...@@ -85,6 +102,8 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error {
} }
log.Debugf("Swarm Listeners at %s", s.ListenAddresses()) log.Debugf("Swarm Listeners at %s", s.ListenAddresses())
maddr := list.Multiaddr()
// signal to our notifiees on successful conn. // signal to our notifiees on successful conn.
s.notifyAll(func(n inet.Notifiee) { s.notifyAll(func(n inet.Notifiee) {
n.Listen((*Network)(s), maddr) n.Listen((*Network)(s), maddr)
...@@ -107,7 +126,7 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error { ...@@ -107,7 +126,7 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error {
if !more { if !more {
return return
} }
log.Warningf("swarm listener accept error: %s", err) log.Errorf("swarm listener accept error: %s", err)
case <-ctx.Done(): case <-ctx.Done():
return return
} }
...@@ -138,5 +157,8 @@ func (s *Swarm) connHandler(c *ps.Conn) *Conn { ...@@ -138,5 +157,8 @@ func (s *Swarm) connHandler(c *ps.Conn) *Conn {
return nil return nil
} }
// if a peer dials us, remove from dial backoff.
s.backf.Clear(sc.RemotePeer())
return sc return sc
} }
...@@ -5,12 +5,12 @@ import ( ...@@ -5,12 +5,12 @@ import (
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
metrics "github.com/ipfs/go-libp2p/p2p/metrics"
inet "github.com/ipfs/go-libp2p/p2p/net" inet "github.com/ipfs/go-libp2p/p2p/net"
metrics "github.com/ipfs/go-libp2p/util/metrics"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" "github.com/jbenet/goprocess"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
) )
// Network implements the inet.Network interface. // Network implements the inet.Network interface.
......
...@@ -5,7 +5,7 @@ import ( ...@@ -5,7 +5,7 @@ import (
"testing" "testing"
"time" "time"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
inet "github.com/ipfs/go-libp2p/p2p/net" inet "github.com/ipfs/go-libp2p/p2p/net"
testutil "github.com/ipfs/go-libp2p/p2p/test/util" testutil "github.com/ipfs/go-libp2p/p2p/test/util"
) )
......
...@@ -4,8 +4,8 @@ import ( ...@@ -4,8 +4,8 @@ import (
"testing" "testing"
"time" "time"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
inet "github.com/ipfs/go-libp2p/p2p/net" inet "github.com/ipfs/go-libp2p/p2p/net"
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
......
...@@ -3,7 +3,7 @@ package swarm ...@@ -3,7 +3,7 @@ package swarm
import ( import (
inet "github.com/ipfs/go-libp2p/p2p/net" inet "github.com/ipfs/go-libp2p/p2p/net"
ps "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream" ps "github.com/jbenet/go-peerstream"
) )
// a Stream is a wrapper around a ps.Stream that exposes a way to get // a Stream is a wrapper around a ps.Stream that exposes a way to get
......
...@@ -9,13 +9,13 @@ import ( ...@@ -9,13 +9,13 @@ import (
"testing" "testing"
"time" "time"
testutil "github.com/ipfs/go-ipfs/util/testutil" metrics "github.com/ipfs/go-libp2p/p2p/metrics"
inet "github.com/ipfs/go-libp2p/p2p/net" inet "github.com/ipfs/go-libp2p/p2p/net"
peer "github.com/ipfs/go-libp2p/p2p/peer" peer "github.com/ipfs/go-libp2p/p2p/peer"
metrics "github.com/ipfs/go-libp2p/util/metrics" testutil "util/testutil"
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-multiaddr"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "golang.org/x/net/context"
) )
func EchoStreamHandler(stream inet.Stream) { func EchoStreamHandler(stream inet.Stream) {
...@@ -237,6 +237,15 @@ func TestSwarm(t *testing.T) { ...@@ -237,6 +237,15 @@ func TestSwarm(t *testing.T) {
SubtestSwarm(t, swarms, msgs) SubtestSwarm(t, swarms, msgs)
} }
func TestBasicSwarm(t *testing.T) {
// t.Skip("skipping for another test")
t.Parallel()
msgs := 1
swarms := 2
SubtestSwarm(t, swarms, msgs)
}
func TestConnHandler(t *testing.T) { func TestConnHandler(t *testing.T) {
// t.Skip("skipping for another test") // t.Skip("skipping for another test")
t.Parallel() t.Parallel()
......
package conn package transport
import ( import (
"net"
"os" "os"
"strings" "strings"
"syscall"
reuseport "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-reuseport" reuseport "github.com/jbenet/go-reuseport"
) )
// envReuseport is the env variable name used to turn off reuse port. // envReuseport is the env variable name used to turn off reuse port.
...@@ -30,6 +32,34 @@ func init() { ...@@ -30,6 +32,34 @@ func init() {
// //
// If this becomes a sought after feature, we could add this to the config. // If this becomes a sought after feature, we could add this to the config.
// In the end, reuseport is a stop-gap. // In the end, reuseport is a stop-gap.
func reuseportIsAvailable() bool { func ReuseportIsAvailable() bool {
return envReuseportVal && reuseport.Available() return envReuseportVal && reuseport.Available()
} }
// ReuseErrShouldRetry diagnoses whether to retry after a reuse error.
// if we failed to bind, we should retry. if bind worked and this is a
// real dial error (remote end didnt answer) then we should not retry.
func ReuseErrShouldRetry(err error) bool {
if err == nil {
return false // hey, it worked! no need to retry.
}
// if it's a network timeout error, it's a legitimate failure.
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
return false
}
errno, ok := err.(syscall.Errno)
if !ok { // not an errno? who knows what this is. retry.
return true
}
switch errno {
case syscall.EADDRINUSE, syscall.EADDRNOTAVAIL:
return true // failure to bind. retry.
case syscall.ECONNREFUSED:
return false // real dial error
default:
return true // optimistically default to retry.
}
}
package transport
import (
"fmt"
"net"
"sync"
"time"
ma "github.com/jbenet/go-multiaddr"
manet "github.com/jbenet/go-multiaddr-net"
reuseport "github.com/jbenet/go-reuseport"
context "golang.org/x/net/context"
lgbl "util/eventlog/loggables"
)
type TcpTransport struct {
dlock sync.Mutex
dialers map[string]Dialer
llock sync.Mutex
listeners map[string]Listener
}
func NewTCPTransport() *TcpTransport {
return &TcpTransport{
dialers: make(map[string]Dialer),
listeners: make(map[string]Listener),
}
}
func (t *TcpTransport) Dialer(laddr ma.Multiaddr, opts ...DialOpt) (Dialer, error) {
t.dlock.Lock()
defer t.dlock.Unlock()
s := laddr.String()
d, found := t.dialers[s]
if found {
return d, nil
}
var base manet.Dialer
var doReuse bool
for _, o := range opts {
switch o := o.(type) {
case TimeoutOpt:
base.Timeout = time.Duration(o)
case ReuseportOpt:
doReuse = bool(o)
default:
return nil, fmt.Errorf("unrecognized option: %#v", o)
}
}
tcpd, err := t.newTcpDialer(base, laddr, doReuse)
if err != nil {
return nil, err
}
t.dialers[s] = tcpd
return tcpd, nil
}
func (t *TcpTransport) Listen(laddr ma.Multiaddr) (Listener, error) {
t.llock.Lock()
defer t.llock.Unlock()
s := laddr.String()
l, found := t.listeners[s]
if found {
return l, nil
}
list, err := manetListen(laddr)
if err != nil {
return nil, err
}
tlist := &tcpListener{
list: list,
transport: t,
}
t.listeners[s] = tlist
return tlist, nil
}
func manetListen(addr ma.Multiaddr) (manet.Listener, error) {
network, naddr, err := manet.DialArgs(addr)
if err != nil {
return nil, err
}
if ReuseportIsAvailable() {
nl, err := reuseport.Listen(network, naddr)
if err == nil {
// hey, it worked!
return manet.WrapNetListener(nl)
}
// reuseport is available, but we failed to listen. log debug, and retry normally.
log.Debugf("reuseport available, but failed to listen: %s %s, %s", network, naddr, err)
}
// either reuseport not available, or it failed. try normally.
return manet.Listen(addr)
}
func (t *TcpTransport) Matches(a ma.Multiaddr) bool {
return IsTcpMultiaddr(a)
}
type tcpDialer struct {
laddr ma.Multiaddr
doReuse bool
rd reuseport.Dialer
madialer manet.Dialer
transport Transport
}
func (t *TcpTransport) newTcpDialer(base manet.Dialer, laddr ma.Multiaddr, doReuse bool) (*tcpDialer, error) {
// get the local net.Addr manually
la, err := manet.ToNetAddr(laddr)
if err != nil {
return nil, err // something wrong with laddr.
}
if doReuse && ReuseportIsAvailable() {
rd := reuseport.Dialer{
D: net.Dialer{
LocalAddr: la,
Timeout: base.Timeout,
},
}
return &tcpDialer{
doReuse: true,
laddr: laddr,
rd: rd,
madialer: base,
transport: t,
}, nil
}
return &tcpDialer{
doReuse: false,
laddr: laddr,
madialer: base,
transport: t,
}, nil
}
func (d *tcpDialer) Dial(raddr ma.Multiaddr) (Conn, error) {
var c manet.Conn
var err error
if d.doReuse {
c, err = d.reuseDial(raddr)
} else {
c, err = d.madialer.Dial(raddr)
}
if err != nil {
return nil, err
}
return &connWrap{
Conn: c,
transport: d.transport,
}, nil
}
func (d *tcpDialer) reuseDial(raddr ma.Multiaddr) (manet.Conn, error) {
logdial := lgbl.Dial("conn", "", "", d.laddr, raddr)
rpev := log.EventBegin(context.TODO(), "tptDialReusePort", logdial)
network, netraddr, err := manet.DialArgs(raddr)
if err != nil {
return nil, err
}
con, err := d.rd.Dial(network, netraddr)
if err == nil {
logdial["reuseport"] = "success"
rpev.Done()
return manet.WrapNetConn(con)
}
if !ReuseErrShouldRetry(err) {
logdial["reuseport"] = "failure"
logdial["error"] = err
rpev.Done()
return nil, err
}
logdial["reuseport"] = "retry"
logdial["error"] = err
rpev.Done()
return d.madialer.Dial(raddr)
}
func (d *tcpDialer) Matches(a ma.Multiaddr) bool {
return IsTcpMultiaddr(a)
}
type tcpListener struct {
list manet.Listener
transport Transport
}
func (d *tcpListener) Accept() (Conn, error) {
c, err := d.list.Accept()
if err != nil {
return nil, err
}
return &connWrap{
Conn: c,
transport: d.transport,
}, nil
}
func (d *tcpListener) Addr() net.Addr {
return d.list.Addr()
}
func (t *tcpListener) Multiaddr() ma.Multiaddr {
return t.list.Multiaddr()
}
func (t *tcpListener) NetListener() net.Listener {
return t.list.NetListener()
}
func (d *tcpListener) Close() error {
return d.list.Close()
}
package transport
import (
"net"
"time"
logging "QmWRypnfEwrgH4k93KEHN5hng7VjKYkWmzDYRuTZeh2Mgh/go-log"
ma "github.com/jbenet/go-multiaddr"
manet "github.com/jbenet/go-multiaddr-net"
)
var log = logging.Logger("transport")
type Conn interface {
manet.Conn
Transport() Transport
}
type Transport interface {
Dialer(laddr ma.Multiaddr, opts ...DialOpt) (Dialer, error)
Listen(laddr ma.Multiaddr) (Listener, error)
Matches(ma.Multiaddr) bool
}
type Dialer interface {
Dial(raddr ma.Multiaddr) (Conn, error)
Matches(ma.Multiaddr) bool
}
type Listener interface {
Accept() (Conn, error)
Close() error
Addr() net.Addr
Multiaddr() ma.Multiaddr
}
type connWrap struct {
manet.Conn
transport Transport
}
func (cw *connWrap) Transport() Transport {
return cw.transport
}
type DialOpt interface{}
type TimeoutOpt time.Duration
type ReuseportOpt bool
var ReusePorts ReuseportOpt = true
func IsTcpMultiaddr(a ma.Multiaddr) bool {
p := a.Protocols()
return len(p) == 2 && (p[0].Name == "ip4" || p[0].Name == "ip6") && p[1].Name == "tcp"
}
func IsUtpMultiaddr(a ma.Multiaddr) bool {
p := a.Protocols()
return len(p) == 3 && p[2].Name == "utp"
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment