Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
go-libp2p
Commits
37bff87b
Commit
37bff87b
authored
Apr 12, 2016
by
Jeromy Johnson
Browse files
Merge pull request #37 from ipfs/deps/cleanup
Deps/cleanup
parents
bd4f4b5b
d9d52ad8
Changes
85
Hide whitespace changes
Inline
Side-by-side
p2p/peer/addr/addrsrcs.go
deleted
100644 → 0
View file @
bd4f4b5b
// Package addr provides utility functions to handle peer addresses.
package
addr
import
(
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
)
// AddrSource is a source of addresses. It allows clients to retrieve
// a set of addresses at a last possible moment in time. It is used
// to query a set of addresses that may change over time, as a result
// of the network changing interfaces or mappings.
type
Source
interface
{
Addrs
()
[]
ma
.
Multiaddr
}
// CombineSources returns a new AddrSource which is the
// concatenation of all input AddrSources:
//
// combined := CombinedSources(a, b)
// combined.Addrs() // append(a.Addrs(), b.Addrs()...)
//
func
CombineSources
(
srcs
...
Source
)
Source
{
return
combinedAS
(
srcs
)
}
type
combinedAS
[]
Source
func
(
cas
combinedAS
)
Addrs
()
[]
ma
.
Multiaddr
{
var
addrs
[]
ma
.
Multiaddr
for
_
,
s
:=
range
cas
{
addrs
=
append
(
addrs
,
s
.
Addrs
()
...
)
}
return
addrs
}
// UniqueSource returns a new AddrSource which omits duplicate
// addresses from the inputs:
//
// unique := UniqueSource(a, b)
// unique.Addrs() // append(a.Addrs(), b.Addrs()...)
// // but only adds each addr once.
//
func
UniqueSource
(
srcs
...
Source
)
Source
{
return
uniqueAS
(
srcs
)
}
type
uniqueAS
[]
Source
func
(
uas
uniqueAS
)
Addrs
()
[]
ma
.
Multiaddr
{
seen
:=
make
(
map
[
string
]
struct
{})
var
addrs
[]
ma
.
Multiaddr
for
_
,
s
:=
range
uas
{
for
_
,
a
:=
range
s
.
Addrs
()
{
s
:=
a
.
String
()
if
_
,
found
:=
seen
[
s
];
!
found
{
addrs
=
append
(
addrs
,
a
)
seen
[
s
]
=
struct
{}{}
}
}
}
return
addrs
}
// Slice is a simple slice of addresses that implements
// the AddrSource interface.
type
Slice
[]
ma
.
Multiaddr
func
(
as
Slice
)
Addrs
()
[]
ma
.
Multiaddr
{
return
as
}
p2p/peer/addr/addrsrcs_test.go
deleted
100644 → 0
View file @
bd4f4b5b
package
addr
import
(
"fmt"
"testing"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
)
func
newAddrOrFatal
(
t
*
testing
.
T
,
s
string
)
ma
.
Multiaddr
{
a
,
err
:=
ma
.
NewMultiaddr
(
s
)
if
err
!=
nil
{
t
.
Fatal
(
"error parsing multiaddr"
,
err
)
}
return
a
}
func
newAddrs
(
t
*
testing
.
T
,
n
int
)
[]
ma
.
Multiaddr
{
addrs
:=
make
([]
ma
.
Multiaddr
,
n
)
for
i
:=
0
;
i
<
n
;
i
++
{
s
:=
fmt
.
Sprintf
(
"/ip4/1.2.3.4/tcp/%d"
,
i
)
addrs
[
i
]
=
newAddrOrFatal
(
t
,
s
)
}
return
addrs
}
func
addrSetsSame
(
a
,
b
[]
ma
.
Multiaddr
)
bool
{
if
len
(
a
)
!=
len
(
b
)
{
return
false
}
for
i
,
aa
:=
range
a
{
bb
:=
b
[
i
]
if
!
aa
.
Equal
(
bb
)
{
return
false
}
}
return
true
}
func
addrSourcesSame
(
a
,
b
Source
)
bool
{
return
addrSetsSame
(
a
.
Addrs
(),
b
.
Addrs
())
}
func
TestAddrCombine
(
t
*
testing
.
T
)
{
addrs
:=
newAddrs
(
t
,
30
)
a
:=
Slice
(
addrs
[
0
:
10
])
b
:=
Slice
(
addrs
[
10
:
20
])
c
:=
Slice
(
addrs
[
20
:
30
])
d
:=
CombineSources
(
a
,
b
,
c
)
if
!
addrSetsSame
(
addrs
,
d
.
Addrs
())
{
t
.
Error
(
"addrs differ"
)
}
if
!
addrSourcesSame
(
Slice
(
addrs
),
d
)
{
t
.
Error
(
"addrs differ"
)
}
}
func
TestAddrUnique
(
t
*
testing
.
T
)
{
addrs
:=
newAddrs
(
t
,
40
)
a
:=
Slice
(
addrs
[
0
:
20
])
b
:=
Slice
(
addrs
[
10
:
30
])
c
:=
Slice
(
addrs
[
20
:
40
])
d
:=
CombineSources
(
a
,
b
,
c
)
e
:=
UniqueSource
(
a
,
b
,
c
)
if
addrSetsSame
(
addrs
,
d
.
Addrs
())
{
t
.
Error
(
"addrs same"
)
}
if
addrSourcesSame
(
Slice
(
addrs
),
d
)
{
t
.
Error
(
"addrs same"
)
}
if
!
addrSetsSame
(
addrs
,
e
.
Addrs
())
{
t
.
Error
(
"addrs differ"
,
addrs
,
"
\n\n
"
,
e
.
Addrs
(),
"
\n\n
"
)
}
if
!
addrSourcesSame
(
Slice
(
addrs
),
e
)
{
t
.
Error
(
"addrs differ"
,
addrs
,
"
\n\n
"
,
e
.
Addrs
(),
"
\n\n
"
)
}
}
p2p/peer/addr_manager.go
deleted
100644 → 0
View file @
bd4f4b5b
package
peer
import
(
"sync"
"time"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
)
const
(
// TempAddrTTL is the ttl used for a short lived address
TempAddrTTL
=
time
.
Second
*
10
// ProviderAddrTTL is the TTL of an address we've received from a provider.
// This is also a temporary address, but lasts longer. After this expires,
// the records we return will require an extra lookup.
ProviderAddrTTL
=
time
.
Minute
*
10
// RecentlyConnectedAddrTTL is used when we recently connected to a peer.
// It means that we are reasonably certain of the peer's address.
RecentlyConnectedAddrTTL
=
time
.
Minute
*
10
// OwnObservedAddrTTL is used for our own external addresses observed by peers.
OwnObservedAddrTTL
=
time
.
Minute
*
10
// PermanentAddrTTL is the ttl for a "permanent address" (e.g. bootstrap nodes)
// if we haven't shipped you an update to ipfs in 356 days
// we probably arent running the same bootstrap nodes...
PermanentAddrTTL
=
time
.
Hour
*
24
*
356
// ConnectedAddrTTL is the ttl used for the addresses of a peer to whom
// we're connected directly. This is basically permanent, as we will
// clear them + re-add under a TempAddrTTL after disconnecting.
ConnectedAddrTTL
=
PermanentAddrTTL
)
type
expiringAddr
struct
{
Addr
ma
.
Multiaddr
TTL
time
.
Time
}
func
(
e
*
expiringAddr
)
ExpiredBy
(
t
time
.
Time
)
bool
{
return
t
.
After
(
e
.
TTL
)
}
type
addrSet
map
[
string
]
expiringAddr
// AddrManager manages addresses.
// The zero-value is ready to be used.
type
AddrManager
struct
{
addrmu
sync
.
Mutex
// guards addrs
addrs
map
[
ID
]
addrSet
}
// ensures the AddrManager is initialized.
// So we can use the zero value.
func
(
mgr
*
AddrManager
)
init
()
{
if
mgr
.
addrs
==
nil
{
mgr
.
addrs
=
make
(
map
[
ID
]
addrSet
)
}
}
func
(
mgr
*
AddrManager
)
Peers
()
[]
ID
{
mgr
.
addrmu
.
Lock
()
defer
mgr
.
addrmu
.
Unlock
()
if
mgr
.
addrs
==
nil
{
return
nil
}
pids
:=
make
([]
ID
,
0
,
len
(
mgr
.
addrs
))
for
pid
:=
range
mgr
.
addrs
{
pids
=
append
(
pids
,
pid
)
}
return
pids
}
// AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
func
(
mgr
*
AddrManager
)
AddAddr
(
p
ID
,
addr
ma
.
Multiaddr
,
ttl
time
.
Duration
)
{
mgr
.
AddAddrs
(
p
,
[]
ma
.
Multiaddr
{
addr
},
ttl
)
}
// AddAddrs gives AddrManager addresses to use, with a given ttl
// (time-to-live), after which the address is no longer valid.
// If the manager has a longer TTL, the operation is a no-op for that address
func
(
mgr
*
AddrManager
)
AddAddrs
(
p
ID
,
addrs
[]
ma
.
Multiaddr
,
ttl
time
.
Duration
)
{
mgr
.
addrmu
.
Lock
()
defer
mgr
.
addrmu
.
Unlock
()
// if ttl is zero, exit. nothing to do.
if
ttl
<=
0
{
return
}
// so zero value can be used
mgr
.
init
()
amap
,
found
:=
mgr
.
addrs
[
p
]
if
!
found
{
amap
=
make
(
addrSet
)
mgr
.
addrs
[
p
]
=
amap
}
// only expand ttls
exp
:=
time
.
Now
()
.
Add
(
ttl
)
for
_
,
addr
:=
range
addrs
{
addrstr
:=
addr
.
String
()
a
,
found
:=
amap
[
addrstr
]
if
!
found
||
exp
.
After
(
a
.
TTL
)
{
amap
[
addrstr
]
=
expiringAddr
{
Addr
:
addr
,
TTL
:
exp
}
}
}
}
// SetAddr calls mgr.SetAddrs(p, addr, ttl)
func
(
mgr
*
AddrManager
)
SetAddr
(
p
ID
,
addr
ma
.
Multiaddr
,
ttl
time
.
Duration
)
{
mgr
.
SetAddrs
(
p
,
[]
ma
.
Multiaddr
{
addr
},
ttl
)
}
// SetAddrs sets the ttl on addresses. This clears any TTL there previously.
// This is used when we receive the best estimate of the validity of an address.
func
(
mgr
*
AddrManager
)
SetAddrs
(
p
ID
,
addrs
[]
ma
.
Multiaddr
,
ttl
time
.
Duration
)
{
mgr
.
addrmu
.
Lock
()
defer
mgr
.
addrmu
.
Unlock
()
// so zero value can be used
mgr
.
init
()
amap
,
found
:=
mgr
.
addrs
[
p
]
if
!
found
{
amap
=
make
(
addrSet
)
mgr
.
addrs
[
p
]
=
amap
}
exp
:=
time
.
Now
()
.
Add
(
ttl
)
for
_
,
addr
:=
range
addrs
{
// re-set all of them for new ttl.
addrs
:=
addr
.
String
()
if
ttl
>
0
{
amap
[
addrs
]
=
expiringAddr
{
Addr
:
addr
,
TTL
:
exp
}
}
else
{
delete
(
amap
,
addrs
)
}
}
}
// Addresses returns all known (and valid) addresses for a given
func
(
mgr
*
AddrManager
)
Addrs
(
p
ID
)
[]
ma
.
Multiaddr
{
mgr
.
addrmu
.
Lock
()
defer
mgr
.
addrmu
.
Unlock
()
// not initialized? nothing to give.
if
mgr
.
addrs
==
nil
{
return
nil
}
maddrs
,
found
:=
mgr
.
addrs
[
p
]
if
!
found
{
return
nil
}
now
:=
time
.
Now
()
good
:=
make
([]
ma
.
Multiaddr
,
0
,
len
(
maddrs
))
var
expired
[]
string
for
s
,
m
:=
range
maddrs
{
if
m
.
ExpiredBy
(
now
)
{
expired
=
append
(
expired
,
s
)
}
else
{
good
=
append
(
good
,
m
.
Addr
)
}
}
// clean up the expired ones.
for
_
,
s
:=
range
expired
{
delete
(
maddrs
,
s
)
}
return
good
}
// ClearAddresses removes all previously stored addresses
func
(
mgr
*
AddrManager
)
ClearAddrs
(
p
ID
)
{
mgr
.
addrmu
.
Lock
()
defer
mgr
.
addrmu
.
Unlock
()
mgr
.
init
()
mgr
.
addrs
[
p
]
=
make
(
addrSet
)
// clear what was there before
}
p2p/peer/addr_manager_test.go
deleted
100644 → 0
View file @
bd4f4b5b
package
peer
import
(
"testing"
"time"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
)
func
IDS
(
t
*
testing
.
T
,
ids
string
)
ID
{
id
,
err
:=
IDB58Decode
(
ids
)
if
err
!=
nil
{
t
.
Fatalf
(
"id %q is bad: %s"
,
ids
,
err
)
}
return
id
}
func
MA
(
t
*
testing
.
T
,
m
string
)
ma
.
Multiaddr
{
maddr
,
err
:=
ma
.
NewMultiaddr
(
m
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
return
maddr
}
func
testHas
(
t
*
testing
.
T
,
exp
,
act
[]
ma
.
Multiaddr
)
{
if
len
(
exp
)
!=
len
(
act
)
{
t
.
Fatal
(
"lengths not the same"
)
}
for
_
,
a
:=
range
exp
{
found
:=
false
for
_
,
b
:=
range
act
{
if
a
.
Equal
(
b
)
{
found
=
true
break
}
}
if
!
found
{
t
.
Fatal
(
"expected address %s not found"
,
a
)
}
}
}
func
TestAddresses
(
t
*
testing
.
T
)
{
id1
:=
IDS
(
t
,
"QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN"
)
id2
:=
IDS
(
t
,
"QmRmPL3FDZKE3Qiwv1RosLdwdvbvg17b2hB39QPScgWKKZ"
)
id3
:=
IDS
(
t
,
"QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ6Kn"
)
id4
:=
IDS
(
t
,
"QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Kn"
)
id5
:=
IDS
(
t
,
"QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Km"
)
ma11
:=
MA
(
t
,
"/ip4/1.2.3.1/tcp/1111"
)
ma21
:=
MA
(
t
,
"/ip4/2.2.3.2/tcp/1111"
)
ma22
:=
MA
(
t
,
"/ip4/2.2.3.2/tcp/2222"
)
ma31
:=
MA
(
t
,
"/ip4/3.2.3.3/tcp/1111"
)
ma32
:=
MA
(
t
,
"/ip4/3.2.3.3/tcp/2222"
)
ma33
:=
MA
(
t
,
"/ip4/3.2.3.3/tcp/3333"
)
ma41
:=
MA
(
t
,
"/ip4/4.2.3.3/tcp/1111"
)
ma42
:=
MA
(
t
,
"/ip4/4.2.3.3/tcp/2222"
)
ma43
:=
MA
(
t
,
"/ip4/4.2.3.3/tcp/3333"
)
ma44
:=
MA
(
t
,
"/ip4/4.2.3.3/tcp/4444"
)
ma51
:=
MA
(
t
,
"/ip4/5.2.3.3/tcp/1111"
)
ma52
:=
MA
(
t
,
"/ip4/5.2.3.3/tcp/2222"
)
ma53
:=
MA
(
t
,
"/ip4/5.2.3.3/tcp/3333"
)
ma54
:=
MA
(
t
,
"/ip4/5.2.3.3/tcp/4444"
)
ma55
:=
MA
(
t
,
"/ip4/5.2.3.3/tcp/5555"
)
ttl
:=
time
.
Hour
m
:=
AddrManager
{}
m
.
AddAddr
(
id1
,
ma11
,
ttl
)
m
.
AddAddrs
(
id2
,
[]
ma
.
Multiaddr
{
ma21
,
ma22
},
ttl
)
m
.
AddAddrs
(
id2
,
[]
ma
.
Multiaddr
{
ma21
,
ma22
},
ttl
)
// idempotency
m
.
AddAddr
(
id3
,
ma31
,
ttl
)
m
.
AddAddr
(
id3
,
ma32
,
ttl
)
m
.
AddAddr
(
id3
,
ma33
,
ttl
)
m
.
AddAddr
(
id3
,
ma33
,
ttl
)
// idempotency
m
.
AddAddr
(
id3
,
ma33
,
ttl
)
m
.
AddAddrs
(
id4
,
[]
ma
.
Multiaddr
{
ma41
,
ma42
,
ma43
,
ma44
},
ttl
)
// multiple
m
.
AddAddrs
(
id5
,
[]
ma
.
Multiaddr
{
ma21
,
ma22
},
ttl
)
// clearing
m
.
AddAddrs
(
id5
,
[]
ma
.
Multiaddr
{
ma41
,
ma42
,
ma43
,
ma44
},
ttl
)
// clearing
m
.
ClearAddrs
(
id5
)
m
.
AddAddrs
(
id5
,
[]
ma
.
Multiaddr
{
ma51
,
ma52
,
ma53
,
ma54
,
ma55
},
ttl
)
// clearing
// test the Addresses return value
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma11
},
m
.
Addrs
(
id1
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma21
,
ma22
},
m
.
Addrs
(
id2
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma31
,
ma32
,
ma33
},
m
.
Addrs
(
id3
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma41
,
ma42
,
ma43
,
ma44
},
m
.
Addrs
(
id4
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma51
,
ma52
,
ma53
,
ma54
,
ma55
},
m
.
Addrs
(
id5
))
}
func
TestAddressesExpire
(
t
*
testing
.
T
)
{
id1
:=
IDS
(
t
,
"QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN"
)
id2
:=
IDS
(
t
,
"QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM"
)
ma11
:=
MA
(
t
,
"/ip4/1.2.3.1/tcp/1111"
)
ma12
:=
MA
(
t
,
"/ip4/2.2.3.2/tcp/2222"
)
ma13
:=
MA
(
t
,
"/ip4/3.2.3.3/tcp/3333"
)
ma24
:=
MA
(
t
,
"/ip4/4.2.3.3/tcp/4444"
)
ma25
:=
MA
(
t
,
"/ip4/5.2.3.3/tcp/5555"
)
m
:=
AddrManager
{}
m
.
AddAddr
(
id1
,
ma11
,
time
.
Hour
)
m
.
AddAddr
(
id1
,
ma12
,
time
.
Hour
)
m
.
AddAddr
(
id1
,
ma13
,
time
.
Hour
)
m
.
AddAddr
(
id2
,
ma24
,
time
.
Hour
)
m
.
AddAddr
(
id2
,
ma25
,
time
.
Hour
)
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma11
,
ma12
,
ma13
},
m
.
Addrs
(
id1
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma24
,
ma25
},
m
.
Addrs
(
id2
))
m
.
SetAddr
(
id1
,
ma11
,
2
*
time
.
Hour
)
m
.
SetAddr
(
id1
,
ma12
,
2
*
time
.
Hour
)
m
.
SetAddr
(
id1
,
ma13
,
2
*
time
.
Hour
)
m
.
SetAddr
(
id2
,
ma24
,
2
*
time
.
Hour
)
m
.
SetAddr
(
id2
,
ma25
,
2
*
time
.
Hour
)
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma11
,
ma12
,
ma13
},
m
.
Addrs
(
id1
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma24
,
ma25
},
m
.
Addrs
(
id2
))
m
.
SetAddr
(
id1
,
ma11
,
time
.
Millisecond
)
<-
time
.
After
(
time
.
Millisecond
)
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma12
,
ma13
},
m
.
Addrs
(
id1
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma24
,
ma25
},
m
.
Addrs
(
id2
))
m
.
SetAddr
(
id1
,
ma13
,
time
.
Millisecond
)
<-
time
.
After
(
time
.
Millisecond
)
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma12
},
m
.
Addrs
(
id1
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma24
,
ma25
},
m
.
Addrs
(
id2
))
m
.
SetAddr
(
id2
,
ma24
,
time
.
Millisecond
)
<-
time
.
After
(
time
.
Millisecond
)
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma12
},
m
.
Addrs
(
id1
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma25
},
m
.
Addrs
(
id2
))
m
.
SetAddr
(
id2
,
ma25
,
time
.
Millisecond
)
<-
time
.
After
(
time
.
Millisecond
)
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma12
},
m
.
Addrs
(
id1
))
testHas
(
t
,
nil
,
m
.
Addrs
(
id2
))
m
.
SetAddr
(
id1
,
ma12
,
time
.
Millisecond
)
<-
time
.
After
(
time
.
Millisecond
)
testHas
(
t
,
nil
,
m
.
Addrs
(
id1
))
testHas
(
t
,
nil
,
m
.
Addrs
(
id2
))
}
func
TestClearWorks
(
t
*
testing
.
T
)
{
id1
:=
IDS
(
t
,
"QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN"
)
id2
:=
IDS
(
t
,
"QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM"
)
ma11
:=
MA
(
t
,
"/ip4/1.2.3.1/tcp/1111"
)
ma12
:=
MA
(
t
,
"/ip4/2.2.3.2/tcp/2222"
)
ma13
:=
MA
(
t
,
"/ip4/3.2.3.3/tcp/3333"
)
ma24
:=
MA
(
t
,
"/ip4/4.2.3.3/tcp/4444"
)
ma25
:=
MA
(
t
,
"/ip4/5.2.3.3/tcp/5555"
)
m
:=
AddrManager
{}
m
.
AddAddr
(
id1
,
ma11
,
time
.
Hour
)
m
.
AddAddr
(
id1
,
ma12
,
time
.
Hour
)
m
.
AddAddr
(
id1
,
ma13
,
time
.
Hour
)
m
.
AddAddr
(
id2
,
ma24
,
time
.
Hour
)
m
.
AddAddr
(
id2
,
ma25
,
time
.
Hour
)
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma11
,
ma12
,
ma13
},
m
.
Addrs
(
id1
))
testHas
(
t
,
[]
ma
.
Multiaddr
{
ma24
,
ma25
},
m
.
Addrs
(
id2
))
m
.
ClearAddrs
(
id1
)
m
.
ClearAddrs
(
id2
)
testHas
(
t
,
nil
,
m
.
Addrs
(
id1
))
testHas
(
t
,
nil
,
m
.
Addrs
(
id2
))
}
p2p/peer/metrics.go
deleted
100644 → 0
View file @
bd4f4b5b
package
peer
import
(
"sync"
"time"
)
// LatencyEWMASmooting governs the decay of the EWMA (the speed
// at which it changes). This must be a normalized (0-1) value.
// 1 is 100% change, 0 is no change.
var
LatencyEWMASmoothing
=
0.1
// Metrics is just an object that tracks metrics
// across a set of peers.
type
Metrics
interface
{
// RecordLatency records a new latency measurement
RecordLatency
(
ID
,
time
.
Duration
)
// LatencyEWMA returns an exponentially-weighted moving avg.
// of all measurements of a peer's latency.
LatencyEWMA
(
ID
)
time
.
Duration
}
type
metrics
struct
{
latmap
map
[
ID
]
time
.
Duration
latmu
sync
.
RWMutex
}
func
NewMetrics
()
Metrics
{
return
&
metrics
{
latmap
:
make
(
map
[
ID
]
time
.
Duration
),
}
}
// RecordLatency records a new latency measurement
func
(
m
*
metrics
)
RecordLatency
(
p
ID
,
next
time
.
Duration
)
{
nextf
:=
float64
(
next
)
s
:=
LatencyEWMASmoothing
if
s
>
1
||
s
<
0
{
s
=
0.1
// ignore the knob. it's broken. look, it jiggles.
}
m
.
latmu
.
Lock
()
ewma
,
found
:=
m
.
latmap
[
p
]
ewmaf
:=
float64
(
ewma
)
if
!
found
{
m
.
latmap
[
p
]
=
next
// when no data, just take it as the mean.
}
else
{
nextf
=
((
1.0
-
s
)
*
ewmaf
)
+
(
s
*
nextf
)
m
.
latmap
[
p
]
=
time
.
Duration
(
nextf
)
}
m
.
latmu
.
Unlock
()
}
// LatencyEWMA returns an exponentially-weighted moving avg.
// of all measurements of a peer's latency.
func
(
m
*
metrics
)
LatencyEWMA
(
p
ID
)
time
.
Duration
{
m
.
latmu
.
RLock
()
lat
:=
m
.
latmap
[
p
]
m
.
latmu
.
RUnlock
()
return
time
.
Duration
(
lat
)
}
p2p/peer/metrics_test.go
deleted
100644 → 0
View file @
bd4f4b5b
package
peer_test
import
(
"fmt"
"math/rand"
"testing"
"time"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
testutil
"github.com/ipfs/go-libp2p/testutil"
)
func
TestLatencyEWMAFun
(
t
*
testing
.
T
)
{
t
.
Skip
(
"run it for fun"
)
m
:=
peer
.
NewMetrics
()
id
,
err
:=
testutil
.
RandPeerID
()
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
mu
:=
100.0
sig
:=
10.0
next
:=
func
()
time
.
Duration
{
mu
=
(
rand
.
NormFloat64
()
*
sig
)
+
mu
return
time
.
Duration
(
mu
)
}
print
:=
func
()
{
fmt
.
Printf
(
"%3.f %3.f --> %d
\n
"
,
sig
,
mu
,
m
.
LatencyEWMA
(
id
))
}
for
{
select
{
case
<-
time
.
After
(
200
*
time
.
Millisecond
)
:
m
.
RecordLatency
(
id
,
next
())
print
()
}
}
}
p2p/peer/peer.go
deleted
100644 → 0
View file @
bd4f4b5b
// package peer implements an object used to represent peers in the ipfs network.
package
peer
import
(
"encoding/hex"
"encoding/json"
"fmt"
"strings"
ic
"github.com/ipfs/go-libp2p/p2p/crypto"
b58
"gx/ipfs/QmT8rehPR3F6bmwL6zjUN8XpiDBFFpMP2myPdC6ApsWfJf/go-base58"
mh
"gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
u
"gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
// ID represents the identity of a peer.
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
)
var
log
=
logging
.
Logger
(
"peer"
)
type
ID
string
// Pretty returns a b58-encoded string of the ID
func
(
id
ID
)
Pretty
()
string
{
return
IDB58Encode
(
id
)
}
func
(
id
ID
)
Loggable
()
map
[
string
]
interface
{}
{
return
map
[
string
]
interface
{}{
"peerID"
:
id
.
Pretty
(),
}
}
// String prints out the peer.
//
// TODO(brian): ensure correctness at ID generation and
// enforce this by only exposing functions that generate
// IDs safely. Then any peer.ID type found in the
// codebase is known to be correct.
func
(
id
ID
)
String
()
string
{
pid
:=
id
.
Pretty
()
//All sha256 nodes start with Qm
//We can skip the Qm to make the peer.ID more useful
if
strings
.
HasPrefix
(
pid
,
"Qm"
)
{
pid
=
pid
[
2
:
]
}
maxRunes
:=
6
if
len
(
pid
)
<
maxRunes
{
maxRunes
=
len
(
pid
)
}
return
fmt
.
Sprintf
(
"<peer.ID %s>"
,
pid
[
:
maxRunes
])
}
// MatchesPrivateKey tests whether this ID was derived from sk
func
(
id
ID
)
MatchesPrivateKey
(
sk
ic
.
PrivKey
)
bool
{
return
id
.
MatchesPublicKey
(
sk
.
GetPublic
())
}
// MatchesPublicKey tests whether this ID was derived from pk
func
(
id
ID
)
MatchesPublicKey
(
pk
ic
.
PubKey
)
bool
{
oid
,
err
:=
IDFromPublicKey
(
pk
)
if
err
!=
nil
{
return
false
}
return
oid
==
id
}
// IDFromString cast a string to ID type, and validate
// the id to make sure it is a multihash.
func
IDFromString
(
s
string
)
(
ID
,
error
)
{
if
_
,
err
:=
mh
.
Cast
([]
byte
(
s
));
err
!=
nil
{
return
ID
(
""
),
err
}
return
ID
(
s
),
nil
}
// IDFromBytes cast a string to ID type, and validate
// the id to make sure it is a multihash.
func
IDFromBytes
(
b
[]
byte
)
(
ID
,
error
)
{
if
_
,
err
:=
mh
.
Cast
(
b
);
err
!=
nil
{
return
ID
(
""
),
err
}
return
ID
(
b
),
nil
}
// IDB58Decode returns a b58-decoded Peer
func
IDB58Decode
(
s
string
)
(
ID
,
error
)
{
m
,
err
:=
mh
.
FromB58String
(
s
)
if
err
!=
nil
{
return
""
,
err
}
return
ID
(
m
),
err
}
// IDB58Encode returns b58-encoded string
func
IDB58Encode
(
id
ID
)
string
{
return
b58
.
Encode
([]
byte
(
id
))
}
// IDHexDecode returns a b58-decoded Peer
func
IDHexDecode
(
s
string
)
(
ID
,
error
)
{
m
,
err
:=
mh
.
FromHexString
(
s
)
if
err
!=
nil
{
return
""
,
err
}
return
ID
(
m
),
err
}
// IDHexEncode returns b58-encoded string
func
IDHexEncode
(
id
ID
)
string
{
return
hex
.
EncodeToString
([]
byte
(
id
))
}
// IDFromPublicKey returns the Peer ID corresponding to pk
func
IDFromPublicKey
(
pk
ic
.
PubKey
)
(
ID
,
error
)
{
b
,
err
:=
pk
.
Bytes
()
if
err
!=
nil
{
return
""
,
err
}
hash
:=
u
.
Hash
(
b
)
return
ID
(
hash
),
nil
}
// IDFromPrivateKey returns the Peer ID corresponding to sk
func
IDFromPrivateKey
(
sk
ic
.
PrivKey
)
(
ID
,
error
)
{
return
IDFromPublicKey
(
sk
.
GetPublic
())
}
// Map maps a Peer ID to a struct.
type
Set
map
[
ID
]
struct
{}
// PeerInfo is a small struct used to pass around a peer with
// a set of addresses (and later, keys?). This is not meant to be
// a complete view of the system, but rather to model updates to
// the peerstore. It is used by things like the routing system.
type
PeerInfo
struct
{
ID
ID
Addrs
[]
ma
.
Multiaddr
}
func
(
pi
*
PeerInfo
)
Loggable
()
map
[
string
]
interface
{}
{
return
map
[
string
]
interface
{}{
"peerID"
:
pi
.
ID
.
Pretty
(),
"addrs"
:
pi
.
Addrs
,
}
}
func
(
pi
*
PeerInfo
)
MarshalJSON
()
([]
byte
,
error
)
{
out
:=
make
(
map
[
string
]
interface
{})
out
[
"ID"
]
=
IDB58Encode
(
pi
.
ID
)
var
addrs
[]
string
for
_
,
a
:=
range
pi
.
Addrs
{
addrs
=
append
(
addrs
,
a
.
String
())
}
out
[
"Addrs"
]
=
addrs
return
json
.
Marshal
(
out
)
}
func
(
pi
*
PeerInfo
)
UnmarshalJSON
(
b
[]
byte
)
error
{
var
data
map
[
string
]
interface
{}
err
:=
json
.
Unmarshal
(
b
,
&
data
)
if
err
!=
nil
{
return
err
}
pid
,
err
:=
IDB58Decode
(
data
[
"ID"
]
.
(
string
))
if
err
!=
nil
{
return
err
}
pi
.
ID
=
pid
addrs
,
ok
:=
data
[
"Addrs"
]
.
([]
interface
{})
if
ok
{
for
_
,
a
:=
range
addrs
{
pi
.
Addrs
=
append
(
pi
.
Addrs
,
ma
.
StringCast
(
a
.
(
string
)))
}
}
return
nil
}
// IDSlice for sorting peers
type
IDSlice
[]
ID
func
(
es
IDSlice
)
Len
()
int
{
return
len
(
es
)
}
func
(
es
IDSlice
)
Swap
(
i
,
j
int
)
{
es
[
i
],
es
[
j
]
=
es
[
j
],
es
[
i
]
}
func
(
es
IDSlice
)
Less
(
i
,
j
int
)
bool
{
return
string
(
es
[
i
])
<
string
(
es
[
j
])
}
p2p/peer/peer_test.go
deleted
100644 → 0
View file @
bd4f4b5b
package
peer_test
import
(
"encoding/base64"
"fmt"
"strings"
"testing"
ic
"github.com/ipfs/go-libp2p/p2p/crypto"
.
"github.com/ipfs/go-libp2p/p2p/peer"
tu
"github.com/ipfs/go-libp2p/testutil"
u
"gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
b58
"gx/ipfs/QmT8rehPR3F6bmwL6zjUN8XpiDBFFpMP2myPdC6ApsWfJf/go-base58"
)
var
gen1
keyset
// generated
var
gen2
keyset
// generated
var
man
keyset
// manual
func
init
()
{
if
err
:=
gen1
.
generate
();
err
!=
nil
{
panic
(
err
)
}
if
err
:=
gen2
.
generate
();
err
!=
nil
{
panic
(
err
)
}
skManBytes
=
strings
.
Replace
(
skManBytes
,
"
\n
"
,
""
,
-
1
)
if
err
:=
man
.
load
(
hpkpMan
,
skManBytes
);
err
!=
nil
{
panic
(
err
)
}
}
type
keyset
struct
{
sk
ic
.
PrivKey
pk
ic
.
PubKey
hpk
string
hpkp
string
}
func
(
ks
*
keyset
)
generate
()
error
{
var
err
error
ks
.
sk
,
ks
.
pk
,
err
=
tu
.
RandTestKeyPair
(
512
)
if
err
!=
nil
{
return
err
}
bpk
,
err
:=
ks
.
pk
.
Bytes
()
if
err
!=
nil
{
return
err
}
ks
.
hpk
=
string
(
u
.
Hash
(
bpk
))
ks
.
hpkp
=
b58
.
Encode
([]
byte
(
ks
.
hpk
))
return
nil
}
func
(
ks
*
keyset
)
load
(
hpkp
,
skBytesStr
string
)
error
{
skBytes
,
err
:=
base64
.
StdEncoding
.
DecodeString
(
skBytesStr
)
if
err
!=
nil
{
return
err
}
ks
.
sk
,
err
=
ic
.
UnmarshalPrivateKey
(
skBytes
)
if
err
!=
nil
{
return
err
}
ks
.
pk
=
ks
.
sk
.
GetPublic
()
bpk
,
err
:=
ks
.
pk
.
Bytes
()
if
err
!=
nil
{
return
err
}
ks
.
hpk
=
string
(
u
.
Hash
(
bpk
))
ks
.
hpkp
=
b58
.
Encode
([]
byte
(
ks
.
hpk
))
if
ks
.
hpkp
!=
hpkp
{
return
fmt
.
Errorf
(
"hpkp doesn't match key. %s"
,
hpkp
)
}
return
nil
}
func
TestIDMatchesPublicKey
(
t
*
testing
.
T
)
{
test
:=
func
(
ks
keyset
)
{
p1
,
err
:=
IDB58Decode
(
ks
.
hpkp
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
ks
.
hpk
!=
string
(
p1
)
{
t
.
Error
(
"p1 and hpk differ"
)
}
if
!
p1
.
MatchesPublicKey
(
ks
.
pk
)
{
t
.
Fatal
(
"p1 does not match pk"
)
}
p2
,
err
:=
IDFromPublicKey
(
ks
.
pk
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
p1
!=
p2
{
t
.
Error
(
"p1 and p2 differ"
,
p1
.
Pretty
(),
p2
.
Pretty
())
}
if
p2
.
Pretty
()
!=
ks
.
hpkp
{
t
.
Error
(
"hpkp and p2.Pretty differ"
,
ks
.
hpkp
,
p2
.
Pretty
())
}
}
test
(
gen1
)
test
(
gen2
)
test
(
man
)
}
func
TestIDMatchesPrivateKey
(
t
*
testing
.
T
)
{
test
:=
func
(
ks
keyset
)
{
p1
,
err
:=
IDB58Decode
(
ks
.
hpkp
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
ks
.
hpk
!=
string
(
p1
)
{
t
.
Error
(
"p1 and hpk differ"
)
}
if
!
p1
.
MatchesPrivateKey
(
ks
.
sk
)
{
t
.
Fatal
(
"p1 does not match sk"
)
}
p2
,
err
:=
IDFromPrivateKey
(
ks
.
sk
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
p1
!=
p2
{
t
.
Error
(
"p1 and p2 differ"
,
p1
.
Pretty
(),
p2
.
Pretty
())
}
}
test
(
gen1
)
test
(
gen2
)
test
(
man
)
}
var
hpkpMan
=
`QmRK3JgmVEGiewxWbhpXLJyjWuGuLeSTMTndA1coMHEy5o`
var
skManBytes
=
`
CAAS4AQwggJcAgEAAoGBAL7w+Wc4VhZhCdM/+Hccg5Nrf4q9NXWwJylbSrXz/unFS24wyk6pEk0zi3W
7li+vSNVO+NtJQw9qGNAMtQKjVTP+3Vt/jfQRnQM3s6awojtjueEWuLYVt62z7mofOhCtj+VwIdZNBo
/EkLZ0ETfcvN5LVtLYa8JkXybnOPsLvK+PAgMBAAECgYBdk09HDM7zzL657uHfzfOVrdslrTCj6p5mo
DzvCxLkkjIzYGnlPuqfNyGjozkpSWgSUc+X+EGLLl3WqEOVdWJtbM61fewEHlRTM5JzScvwrJ39t7o6
CCAjKA0cBWBd6UWgbN/t53RoWvh9HrA2AW5YrT0ZiAgKe9y7EMUaENVJ8QJBAPhpdmb4ZL4Fkm4OKia
NEcjzn6mGTlZtef7K/0oRC9+2JkQnCuf6HBpaRhJoCJYg7DW8ZY+AV6xClKrgjBOfERMCQQDExhnzu2
dsQ9k8QChBlpHO0TRbZBiQfC70oU31kM1AeLseZRmrxv9Yxzdl8D693NNWS2JbKOXl0kMHHcuGQLMVA
kBZ7WvkmPV3aPL6jnwp2pXepntdVnaTiSxJ1dkXShZ/VSSDNZMYKY306EtHrIu3NZHtXhdyHKcggDXr
qkBrdgErAkAlpGPojUwemOggr4FD8sLX1ot2hDJyyV7OK2FXfajWEYJyMRL1Gm9Uk1+Un53RAkJneqp
JGAzKpyttXBTIDO51AkEA98KTiROMnnU8Y6Mgcvr68/SMIsvCYMt9/mtwSBGgl80VaTQ5Hpaktl6Xbh
VUt5Wv0tRxlXZiViCGCD1EtrrwTw==
`
p2p/peer/peerstore.go
deleted
100644 → 0
View file @
bd4f4b5b
package
peer
import
(
"errors"
"sync"
"time"
ic
"github.com/ipfs/go-libp2p/p2p/crypto"
//ds "github.com/jbenet/go-datastore"
//dssync "github.com/jbenet/go-datastore/sync"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
)
const
(
// AddressTTL is the expiration time of addresses.
AddressTTL
=
time
.
Hour
)
// Peerstore provides a threadsafe store of Peer related
// information.
type
Peerstore
interface
{
AddrBook
KeyBook
Metrics
// Peers returns a list of all peer.IDs in this Peerstore
Peers
()
[]
ID
// PeerInfo returns a peer.PeerInfo struct for given peer.ID.
// This is a small slice of the information Peerstore has on
// that peer, useful to other services.
PeerInfo
(
ID
)
PeerInfo
// Get/Put is a simple registry for other peer-related key/value pairs.
// if we find something we use often, it should become its own set of
// methods. this is a last resort.
Get
(
id
ID
,
key
string
)
(
interface
{},
error
)
Put
(
id
ID
,
key
string
,
val
interface
{})
error
}
// AddrBook is an interface that fits the new AddrManager. I'm patching
// it up in here to avoid changing a ton of the codebase.
type
AddrBook
interface
{
// AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
AddAddr
(
p
ID
,
addr
ma
.
Multiaddr
,
ttl
time
.
Duration
)
// AddAddrs gives AddrManager addresses to use, with a given ttl
// (time-to-live), after which the address is no longer valid.
// If the manager has a longer TTL, the operation is a no-op for that address
AddAddrs
(
p
ID
,
addrs
[]
ma
.
Multiaddr
,
ttl
time
.
Duration
)
// SetAddr calls mgr.SetAddrs(p, addr, ttl)
SetAddr
(
p
ID
,
addr
ma
.
Multiaddr
,
ttl
time
.
Duration
)
// SetAddrs sets the ttl on addresses. This clears any TTL there previously.
// This is used when we receive the best estimate of the validity of an address.
SetAddrs
(
p
ID
,
addrs
[]
ma
.
Multiaddr
,
ttl
time
.
Duration
)
// Addresses returns all known (and valid) addresses for a given
Addrs
(
p
ID
)
[]
ma
.
Multiaddr
// ClearAddresses removes all previously stored addresses
ClearAddrs
(
p
ID
)
}
// KeyBook tracks the Public keys of Peers.
type
KeyBook
interface
{
PubKey
(
ID
)
ic
.
PubKey
AddPubKey
(
ID
,
ic
.
PubKey
)
error
PrivKey
(
ID
)
ic
.
PrivKey
AddPrivKey
(
ID
,
ic
.
PrivKey
)
error
}
type
keybook
struct
{
pks
map
[
ID
]
ic
.
PubKey
sks
map
[
ID
]
ic
.
PrivKey
sync
.
RWMutex
// same lock. wont happen a ton.
}
func
newKeybook
()
*
keybook
{
return
&
keybook
{
pks
:
map
[
ID
]
ic
.
PubKey
{},
sks
:
map
[
ID
]
ic
.
PrivKey
{},
}
}
func
(
kb
*
keybook
)
Peers
()
[]
ID
{
kb
.
RLock
()
ps
:=
make
([]
ID
,
0
,
len
(
kb
.
pks
)
+
len
(
kb
.
sks
))
for
p
:=
range
kb
.
pks
{
ps
=
append
(
ps
,
p
)
}
for
p
:=
range
kb
.
sks
{
if
_
,
found
:=
kb
.
pks
[
p
];
!
found
{
ps
=
append
(
ps
,
p
)
}
}
kb
.
RUnlock
()
return
ps
}
func
(
kb
*
keybook
)
PubKey
(
p
ID
)
ic
.
PubKey
{
kb
.
RLock
()
pk
:=
kb
.
pks
[
p
]
kb
.
RUnlock
()
return
pk
}
func
(
kb
*
keybook
)
AddPubKey
(
p
ID
,
pk
ic
.
PubKey
)
error
{
// check it's correct first
if
!
p
.
MatchesPublicKey
(
pk
)
{
return
errors
.
New
(
"ID does not match PublicKey"
)
}
kb
.
Lock
()
kb
.
pks
[
p
]
=
pk
kb
.
Unlock
()
return
nil
}
func
(
kb
*
keybook
)
PrivKey
(
p
ID
)
ic
.
PrivKey
{
kb
.
RLock
()
sk
:=
kb
.
sks
[
p
]
kb
.
RUnlock
()
return
sk
}
func
(
kb
*
keybook
)
AddPrivKey
(
p
ID
,
sk
ic
.
PrivKey
)
error
{
if
sk
==
nil
{
return
errors
.
New
(
"sk is nil (PrivKey)"
)
}
// check it's correct first
if
!
p
.
MatchesPrivateKey
(
sk
)
{
return
errors
.
New
(
"ID does not match PrivateKey"
)
}
kb
.
Lock
()
kb
.
sks
[
p
]
=
sk
kb
.
Unlock
()
return
nil
}
type
peerstore
struct
{
keybook
metrics
AddrManager
// store other data, like versions
//ds ds.ThreadSafeDatastore
// TODO: use a datastore for this
ds
map
[
string
]
interface
{}
dslock
sync
.
Mutex
}
// NewPeerstore creates a threadsafe collection of peers.
func
NewPeerstore
()
Peerstore
{
return
&
peerstore
{
keybook
:
*
newKeybook
(),
metrics
:
*
(
NewMetrics
())
.
(
*
metrics
),
AddrManager
:
AddrManager
{},
//ds: dssync.MutexWrap(ds.NewMapDatastore()),
ds
:
make
(
map
[
string
]
interface
{}),
}
}
func
(
ps
*
peerstore
)
Put
(
p
ID
,
key
string
,
val
interface
{})
error
{
//dsk := ds.NewKey(string(p) + "/" + key)
//return ps.ds.Put(dsk, val)
ps
.
dslock
.
Lock
()
defer
ps
.
dslock
.
Unlock
()
ps
.
ds
[
string
(
p
)
+
"/"
+
key
]
=
val
return
nil
}
func
(
ps
*
peerstore
)
Get
(
p
ID
,
key
string
)
(
interface
{},
error
)
{
//dsk := ds.NewKey(string(p) + "/" + key)
//return ps.ds.Get(dsk)
ps
.
dslock
.
Lock
()
defer
ps
.
dslock
.
Unlock
()
i
,
ok
:=
ps
.
ds
[
string
(
p
)
+
"/"
+
key
]
if
!
ok
{
return
nil
,
errors
.
New
(
"item not found"
)
}
return
i
,
nil
}
func
(
ps
*
peerstore
)
Peers
()
[]
ID
{
set
:=
map
[
ID
]
struct
{}{}
for
_
,
p
:=
range
ps
.
keybook
.
Peers
()
{
set
[
p
]
=
struct
{}{}
}
for
_
,
p
:=
range
ps
.
AddrManager
.
Peers
()
{
set
[
p
]
=
struct
{}{}
}
pps
:=
make
([]
ID
,
0
,
len
(
set
))
for
p
:=
range
set
{
pps
=
append
(
pps
,
p
)
}
return
pps
}
func
(
ps
*
peerstore
)
PeerInfo
(
p
ID
)
PeerInfo
{
return
PeerInfo
{
ID
:
p
,
Addrs
:
ps
.
AddrManager
.
Addrs
(
p
),
}
}
func
PeerInfos
(
ps
Peerstore
,
peers
[]
ID
)
[]
PeerInfo
{
pi
:=
make
([]
PeerInfo
,
len
(
peers
))
for
i
,
p
:=
range
peers
{
pi
[
i
]
=
ps
.
PeerInfo
(
p
)
}
return
pi
}
func
PeerInfoIDs
(
pis
[]
PeerInfo
)
[]
ID
{
ps
:=
make
([]
ID
,
len
(
pis
))
for
i
,
pi
:=
range
pis
{
ps
[
i
]
=
pi
.
ID
}
return
ps
}
p2p/peer/queue/distance.go
deleted
100644 → 0
View file @
bd4f4b5b
package
queue
import
(
"container/heap"
"math/big"
"sync"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
ks
"gx/ipfs/QmUusaX99BZoELh7dmPgirqRQ1FAmMnmnBn3oiqDFGBUSc/go-keyspace"
)
// peerMetric tracks a peer and its distance to something else.
type
peerMetric
struct
{
// the peer
peer
peer
.
ID
// big.Int for XOR metric
metric
*
big
.
Int
}
// peerMetricHeap implements a heap of peerDistances
type
peerMetricHeap
[]
*
peerMetric
func
(
ph
peerMetricHeap
)
Len
()
int
{
return
len
(
ph
)
}
func
(
ph
peerMetricHeap
)
Less
(
i
,
j
int
)
bool
{
return
-
1
==
ph
[
i
]
.
metric
.
Cmp
(
ph
[
j
]
.
metric
)
}
func
(
ph
peerMetricHeap
)
Swap
(
i
,
j
int
)
{
ph
[
i
],
ph
[
j
]
=
ph
[
j
],
ph
[
i
]
}
func
(
ph
*
peerMetricHeap
)
Push
(
x
interface
{})
{
item
:=
x
.
(
*
peerMetric
)
*
ph
=
append
(
*
ph
,
item
)
}
func
(
ph
*
peerMetricHeap
)
Pop
()
interface
{}
{
old
:=
*
ph
n
:=
len
(
old
)
item
:=
old
[
n
-
1
]
*
ph
=
old
[
0
:
n
-
1
]
return
item
}
// distancePQ implements heap.Interface and PeerQueue
type
distancePQ
struct
{
// from is the Key this PQ measures against
from
ks
.
Key
// heap is a heap of peerDistance items
heap
peerMetricHeap
sync
.
RWMutex
}
func
(
pq
*
distancePQ
)
Len
()
int
{
pq
.
Lock
()
defer
pq
.
Unlock
()
return
len
(
pq
.
heap
)
}
func
(
pq
*
distancePQ
)
Enqueue
(
p
peer
.
ID
)
{
pq
.
Lock
()
defer
pq
.
Unlock
()
distance
:=
ks
.
XORKeySpace
.
Key
([]
byte
(
p
))
.
Distance
(
pq
.
from
)
heap
.
Push
(
&
pq
.
heap
,
&
peerMetric
{
peer
:
p
,
metric
:
distance
,
})
}
func
(
pq
*
distancePQ
)
Dequeue
()
peer
.
ID
{
pq
.
Lock
()
defer
pq
.
Unlock
()
if
len
(
pq
.
heap
)
<
1
{
panic
(
"called Dequeue on an empty PeerQueue"
)
// will panic internally anyway, but we can help debug here
}
o
:=
heap
.
Pop
(
&
pq
.
heap
)
p
:=
o
.
(
*
peerMetric
)
return
p
.
peer
}
// NewXORDistancePQ returns a PeerQueue which maintains its peers sorted
// in terms of their distances to each other in an XORKeySpace (i.e. using
// XOR as a metric of distance).
func
NewXORDistancePQ
(
from
string
)
PeerQueue
{
return
&
distancePQ
{
from
:
ks
.
XORKeySpace
.
Key
([]
byte
(
from
)),
heap
:
peerMetricHeap
{},
}
}
p2p/peer/queue/interface.go
deleted
100644 → 0
View file @
bd4f4b5b
package
queue
import
peer
"github.com/ipfs/go-libp2p/p2p/peer"
// PeerQueue maintains a set of peers ordered according to a metric.
// Implementations of PeerQueue could order peers based on distances along
// a KeySpace, latency measurements, trustworthiness, reputation, etc.
type
PeerQueue
interface
{
// Len returns the number of items in PeerQueue
Len
()
int
// Enqueue adds this node to the queue.
Enqueue
(
peer
.
ID
)
// Dequeue retrieves the highest (smallest int) priority node
Dequeue
()
peer
.
ID
}
p2p/peer/queue/queue_test.go
deleted
100644 → 0
View file @
bd4f4b5b
package
queue
import
(
"fmt"
"sync"
"testing"
"time"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
u
"gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)
func
TestQueue
(
t
*
testing
.
T
)
{
p1
:=
peer
.
ID
(
"11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31"
)
// these aren't valid, because need to hex-decode.
p2
:=
peer
.
ID
(
"11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32"
)
// these aren't valid, because need to hex-decode.
p3
:=
peer
.
ID
(
"11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"
)
// these aren't valid, because need to hex-decode.
p4
:=
peer
.
ID
(
"11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34"
)
// these aren't valid, because need to hex-decode.
p5
:=
peer
.
ID
(
"11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31"
)
// these aren't valid, because need to hex-decode.
// but they work.
// these are the peer.IDs' XORKeySpace Key values:
// [228 47 151 130 156 102 222 232 218 31 132 94 170 208 80 253 120 103 55 35 91 237 48 157 81 245 57 247 66 150 9 40]
// [26 249 85 75 54 49 25 30 21 86 117 62 85 145 48 175 155 194 210 216 58 14 241 143 28 209 129 144 122 28 163 6]
// [78 135 26 216 178 181 224 181 234 117 2 248 152 115 255 103 244 34 4 152 193 88 9 225 8 127 216 158 226 8 236 246]
// [125 135 124 6 226 160 101 94 192 57 39 12 18 79 121 140 190 154 147 55 44 83 101 151 63 255 94 179 51 203 241 51]
pq
:=
NewXORDistancePQ
(
"11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31"
)
pq
.
Enqueue
(
p3
)
pq
.
Enqueue
(
p1
)
pq
.
Enqueue
(
p2
)
pq
.
Enqueue
(
p4
)
pq
.
Enqueue
(
p5
)
pq
.
Enqueue
(
p1
)
// should come out as: p1, p4, p3, p2
if
d
:=
pq
.
Dequeue
();
d
!=
p1
&&
d
!=
p5
{
t
.
Error
(
"ordering failed"
)
}
if
d
:=
pq
.
Dequeue
();
d
!=
p1
&&
d
!=
p5
{
t
.
Error
(
"ordering failed"
)
}
if
d
:=
pq
.
Dequeue
();
d
!=
p1
&&
d
!=
p5
{
t
.
Error
(
"ordering failed"
)
}
if
pq
.
Dequeue
()
!=
p4
{
t
.
Error
(
"ordering failed"
)
}
if
pq
.
Dequeue
()
!=
p3
{
t
.
Error
(
"ordering failed"
)
}
if
pq
.
Dequeue
()
!=
p2
{
t
.
Error
(
"ordering failed"
)
}
}
func
newPeerTime
(
t
time
.
Time
)
peer
.
ID
{
s
:=
fmt
.
Sprintf
(
"hmmm time: %v"
,
t
)
h
:=
u
.
Hash
([]
byte
(
s
))
return
peer
.
ID
(
h
)
}
func
TestSyncQueue
(
t
*
testing
.
T
)
{
tickT
:=
time
.
Microsecond
*
50
max
:=
5000
consumerN
:=
10
countsIn
:=
make
([]
int
,
consumerN
*
2
)
countsOut
:=
make
([]
int
,
consumerN
)
if
testing
.
Short
()
{
max
=
1000
}
ctx
:=
context
.
Background
()
pq
:=
NewXORDistancePQ
(
"11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31"
)
cq
:=
NewChanQueue
(
ctx
,
pq
)
wg
:=
sync
.
WaitGroup
{}
produce
:=
func
(
p
int
)
{
defer
wg
.
Done
()
tick
:=
time
.
Tick
(
tickT
)
for
i
:=
0
;
i
<
max
;
i
++
{
select
{
case
tim
:=
<-
tick
:
countsIn
[
p
]
++
cq
.
EnqChan
<-
newPeerTime
(
tim
)
case
<-
ctx
.
Done
()
:
return
}
}
}
consume
:=
func
(
c
int
)
{
defer
wg
.
Done
()
for
{
select
{
case
<-
cq
.
DeqChan
:
countsOut
[
c
]
++
if
countsOut
[
c
]
>=
max
*
2
{
return
}
case
<-
ctx
.
Done
()
:
return
}
}
}
// make n * 2 producers and n consumers
for
i
:=
0
;
i
<
consumerN
;
i
++
{
wg
.
Add
(
3
)
go
produce
(
i
)
go
produce
(
consumerN
+
i
)
go
consume
(
i
)
}
wg
.
Wait
()
sum
:=
func
(
ns
[]
int
)
int
{
total
:=
0
for
_
,
n
:=
range
ns
{
total
+=
n
}
return
total
}
if
sum
(
countsIn
)
!=
sum
(
countsOut
)
{
t
.
Errorf
(
"didnt get all of them out: %d/%d"
,
sum
(
countsOut
),
sum
(
countsIn
))
}
}
p2p/peer/queue/sync.go
deleted
100644 → 0
View file @
bd4f4b5b
package
queue
import
(
peer
"github.com/ipfs/go-libp2p/p2p/peer"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
)
var
log
=
logging
.
Logger
(
"peerqueue"
)
// ChanQueue makes any PeerQueue synchronizable through channels.
type
ChanQueue
struct
{
Queue
PeerQueue
EnqChan
chan
<-
peer
.
ID
DeqChan
<-
chan
peer
.
ID
}
// NewChanQueue creates a ChanQueue by wrapping pq.
func
NewChanQueue
(
ctx
context
.
Context
,
pq
PeerQueue
)
*
ChanQueue
{
cq
:=
&
ChanQueue
{
Queue
:
pq
}
cq
.
process
(
ctx
)
return
cq
}
func
(
cq
*
ChanQueue
)
process
(
ctx
context
.
Context
)
{
// construct the channels here to be able to use them bidirectionally
enqChan
:=
make
(
chan
peer
.
ID
)
deqChan
:=
make
(
chan
peer
.
ID
)
cq
.
EnqChan
=
enqChan
cq
.
DeqChan
=
deqChan
go
func
()
{
log
.
Debug
(
"processing"
)
defer
log
.
Debug
(
"closed"
)
defer
close
(
deqChan
)
var
next
peer
.
ID
var
item
peer
.
ID
var
more
bool
for
{
if
cq
.
Queue
.
Len
()
==
0
{
// log.Debug("wait for enqueue")
select
{
case
next
,
more
=
<-
enqChan
:
if
!
more
{
return
}
// log.Debug("got", next)
case
<-
ctx
.
Done
()
:
return
}
}
else
{
next
=
cq
.
Queue
.
Dequeue
()
// log.Debug("peek", next)
}
select
{
case
item
,
more
=
<-
enqChan
:
if
!
more
{
if
cq
.
Queue
.
Len
()
>
0
{
return
// we're done done.
}
enqChan
=
nil
// closed, so no use.
}
// log.Debug("got", item)
cq
.
Queue
.
Enqueue
(
item
)
cq
.
Queue
.
Enqueue
(
next
)
// order may have changed.
next
=
""
case
deqChan
<-
next
:
// log.Debug("dequeued", next)
next
=
""
case
<-
ctx
.
Done
()
:
return
}
}
}()
}
p2p/protocol/identify/id.go
View file @
37bff87b
...
@@ -7,15 +7,15 @@ import (
...
@@ -7,15 +7,15 @@ import (
host
"github.com/ipfs/go-libp2p/p2p/host"
host
"github.com/ipfs/go-libp2p/p2p/host"
mstream
"github.com/ipfs/go-libp2p/p2p/metrics/stream"
mstream
"github.com/ipfs/go-libp2p/p2p/metrics/stream"
inet
"github.com/ipfs/go-libp2p/p2p/net"
inet
"github.com/ipfs/go-libp2p/p2p/net"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
pb
"github.com/ipfs/go-libp2p/p2p/protocol/identify/pb"
pb
"github.com/ipfs/go-libp2p/p2p/protocol/identify/pb"
msmux
"gx/ipfs/QmUeEcYJrzAEKdQXjzTxCgNZgc9sRuwharsvzzm5Gd2oGB/go-multistream"
msmux
"gx/ipfs/QmUeEcYJrzAEKdQXjzTxCgNZgc9sRuwharsvzzm5Gd2oGB/go-multistream"
peer
"gx/ipfs/QmY1xNhBfF9xA1pmD8yejyQAyd77K68qNN6JPM1CN2eiRu/go-libp2p-peer"
ggio
"gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io"
ggio
"gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
semver
"gx/ipfs/QmcrrEpx3VMUbrbgVroH3YiYyUS5c4YAykzyPJWKspUYLa/go-semver/semver"
semver
"gx/ipfs/QmcrrEpx3VMUbrbgVroH3YiYyUS5c4YAykzyPJWKspUYLa/go-semver/semver"
lgbl
"g
ithub.com/ipfs
/go-libp2p
/
loggables"
lgbl
"g
x/ipfs/QmSyBhZt2upyQ3NJmTpab1pX6hesA59vcYTGmgoDorZZbw
/go-libp2p
-
loggables"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
)
)
...
...
p2p/protocol/identify/id_test.go
View file @
37bff87b
...
@@ -5,9 +5,9 @@ import (
...
@@ -5,9 +5,9 @@ import (
"time"
"time"
host
"github.com/ipfs/go-libp2p/p2p/host"
host
"github.com/ipfs/go-libp2p/p2p/host"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
identify
"github.com/ipfs/go-libp2p/p2p/protocol/identify"
identify
"github.com/ipfs/go-libp2p/p2p/protocol/identify"
testutil
"github.com/ipfs/go-libp2p/p2p/test/util"
testutil
"github.com/ipfs/go-libp2p/p2p/test/util"
peer
"gx/ipfs/QmY1xNhBfF9xA1pmD8yejyQAyd77K68qNN6JPM1CN2eiRu/go-libp2p-peer"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
...
...
p2p/protocol/identify/obsaddr.go
View file @
37bff87b
...
@@ -4,8 +4,7 @@ import (
...
@@ -4,8 +4,7 @@ import (
"sync"
"sync"
"time"
"time"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
peer
"gx/ipfs/QmY1xNhBfF9xA1pmD8yejyQAyd77K68qNN6JPM1CN2eiRu/go-libp2p-peer"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
ma
"gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr"
)
)
...
...
p2p/protocol/ping/ping.go
View file @
37bff87b
...
@@ -8,7 +8,7 @@ import (
...
@@ -8,7 +8,7 @@ import (
host
"github.com/ipfs/go-libp2p/p2p/host"
host
"github.com/ipfs/go-libp2p/p2p/host"
inet
"github.com/ipfs/go-libp2p/p2p/net"
inet
"github.com/ipfs/go-libp2p/p2p/net"
peer
"g
ithub.com/ipfs
/go-libp2p
/p2p/
peer"
peer
"g
x/ipfs/QmY1xNhBfF9xA1pmD8yejyQAyd77K68qNN6JPM1CN2eiRu
/go-libp2p
-
peer"
u
"gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
u
"gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
...
...
p2p/protocol/ping/ping_test.go
View file @
37bff87b
...
@@ -4,8 +4,8 @@ import (
...
@@ -4,8 +4,8 @@ import (
"testing"
"testing"
"time"
"time"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
netutil
"github.com/ipfs/go-libp2p/p2p/test/util"
netutil
"github.com/ipfs/go-libp2p/p2p/test/util"
peer
"gx/ipfs/QmY1xNhBfF9xA1pmD8yejyQAyd77K68qNN6JPM1CN2eiRu/go-libp2p-peer"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
)
)
...
...
p2p/protocol/relay/relay.go
View file @
37bff87b
...
@@ -7,8 +7,8 @@ import (
...
@@ -7,8 +7,8 @@ import (
host
"github.com/ipfs/go-libp2p/p2p/host"
host
"github.com/ipfs/go-libp2p/p2p/host"
inet
"github.com/ipfs/go-libp2p/p2p/net"
inet
"github.com/ipfs/go-libp2p/p2p/net"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
protocol
"github.com/ipfs/go-libp2p/p2p/protocol"
protocol
"github.com/ipfs/go-libp2p/p2p/protocol"
peer
"gx/ipfs/QmY1xNhBfF9xA1pmD8yejyQAyd77K68qNN6JPM1CN2eiRu/go-libp2p-peer"
mh
"gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
mh
"gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
...
...
p2p/test/backpressure/backpressure_test.go
View file @
37bff87b
...
@@ -8,9 +8,9 @@ import (
...
@@ -8,9 +8,9 @@ import (
host
"github.com/ipfs/go-libp2p/p2p/host"
host
"github.com/ipfs/go-libp2p/p2p/host"
inet
"github.com/ipfs/go-libp2p/p2p/net"
inet
"github.com/ipfs/go-libp2p/p2p/net"
peer
"github.com/ipfs/go-libp2p/p2p/peer"
protocol
"github.com/ipfs/go-libp2p/p2p/protocol"
protocol
"github.com/ipfs/go-libp2p/p2p/protocol"
testutil
"github.com/ipfs/go-libp2p/p2p/test/util"
testutil
"github.com/ipfs/go-libp2p/p2p/test/util"
peer
"gx/ipfs/QmY1xNhBfF9xA1pmD8yejyQAyd77K68qNN6JPM1CN2eiRu/go-libp2p-peer"
u
"gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
u
"gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
context
"gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
logging
"gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log"
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment