Commit 216db146 authored by Jeromy's avatar Jeromy
Browse files

more vendoring

parent e1427950
{
"name": "go-utp",
"author": "whyrusleeping",
"version": "1.0.0",
"gxDependencies": [
{
"name": "jitter",
"hash": "QmbDXAJ4Fzpmqw9kTVPtPGZEsgyn33ipbUQEe8vMUZLnTS",
"version": "1.0.0"
},
{
"name": "envpprof",
"hash": "QmWsa476RGjb9scWzcRVts3QZsYjU5Kt6Y9qe8Q3vc5FHR",
"version": "1.0.0"
}
],
"language": "go",
"gx": {
"dvcsimport": "github.com/anacrolix/utp"
}
}
\ No newline at end of file
# This shell script uses nc-like executables to send and receive the file at
# $1, and prints the checksums. 3 such executables are
# github.com/h2so5/utp/ucat, invoked as h2so5-ucat, libutp-ucat, which is the
# ucat or ucat-static generated by the C++ libutp, and lastly, ./cmd/ucat from
# this repository. A good file in my experiments is no more than a few 100MB,
# or you'll be waiting a while.
set -eu
# set -x
# Passed to invocations of godo for package ./cmd/ucat.
#GODOFLAGS=-race
#export GO_UTP_PACKET_DROP=0.1
export GOPPROF=
# Invokes the implementation to test against. If there's an arg, then it's
# expected to listen.
function other_ucat() {
if [[ $# != 0 ]]; then
libutp-ucat -l -p "$port"
# h2so5-ucat -l :"$port"
else
libutp-ucat localhost "$port"
# h2so5-ucat localhost:"$port"
fi
}
function md5cmd() {
(which md5sum > /dev/null && md5sum "$@") || (which md5 > /dev/null && md5 "$@") || md5sum "$@"
}
# Check what the correct result is.
md5cmd "$1"
rate() {
pv -a -W -b
}
port=4000
echo 'utp->other_ucat'
# Send from this uTP implementation to another client.
other_ucat -l | rate | md5cmd &
# sleep 1
godo ${GODOFLAGS-} ./cmd/ucat localhost "$port" < "$1"
wait
echo 'other_ucat->utp'
# Send from the other implementation, to this one.
GO_UTP_LOGGING=0 godo ${GODOFLAGS-} ./cmd/ucat -l -p "$port" | rate | md5cmd &
# Never receive from h2so5's ucat without a small sleep first. Don't know why.
sleep 1
other_ucat < "$1"
wait
echo 'libutp->libutp'
libutp-ucat -l -p "$port" | rate | md5cmd &
libutp-ucat localhost "$port" < "$1"
wait
echo 'utp->utp'
godo ./cmd/ucat -l -p "$port" | rate | md5cmd &
sleep 1
godo ./cmd/ucat localhost "$port" < "$1"
wait
# Now check the hashes match (yes you).
// Package utp implements uTP, the micro transport protocol as used with
// Bittorrent. It opts for simplicity and reliability over strict adherence to
// the (poor) spec. It allows using the underlying OS-level transport despite
// dispatching uTP on top to allow for example, shared socket use with DHT.
// Additionally, multiple uTP connections can share the same OS socket, to
// truly realize uTP's claim to be light on system and network switching
// resources.
//
// Socket is a wrapper of net.UDPConn, and performs dispatching of uTP packets
// to attached uTP Conns. Dial and Accept is done via Socket. Conn implements
// net.Conn over uTP, via aforementioned Socket.
package utp
import (
"encoding/binary"
"errors"
"expvar"
"fmt"
"io"
"log"
"math"
"math/rand"
"net"
"os"
"strconv"
"sync"
"time"
"QmbDXAJ4Fzpmqw9kTVPtPGZEsgyn33ipbUQEe8vMUZLnTS/jitter"
)
const (
// Maximum received SYNs that haven't been accepted. If more SYNs are
// received, a pseudo randomly selected SYN is replied to with a reset to
// make room.
backlog = 50
// IPv6 min MTU is 1280, -40 for IPv6 header, and ~8 for fragment header?
minMTU = 1232
recvWindow = 1 << 18 // 256KiB
// uTP header of 20, +2 for the next extension, and 8 bytes of selective
// ACK.
maxHeaderSize = 30
maxPayloadSize = minMTU - maxHeaderSize
maxRecvSize = 0x2000
// Maximum out-of-order packets to buffer.
maxUnackedInbound = 256
maxUnackedSends = 256
)
var (
ackSkippedResends = expvar.NewInt("utpAckSkippedResends")
// Inbound packets processed by a Conn.
deliveriesProcessed = expvar.NewInt("utpDeliveriesProcessed")
sentStatePackets = expvar.NewInt("utpSentStatePackets")
unusedReads = expvar.NewInt("utpUnusedReads")
sendBufferPool = sync.Pool{
New: func() interface{} { return make([]byte, minMTU) },
}
// This is the latency we assume on new connections. It should be higher
// than the latency we expect on most connections to prevent excessive
// resending to peers that take a long time to respond, before we've got a
// better idea of their actual latency.
initialLatency = 400 * time.Millisecond
// If a write isn't acked within this period, destroy the connection.
writeTimeout = 15 * time.Second
packetReadTimeout = 2 * time.Minute
)
type deadlineCallback struct {
deadline time.Time
timer *time.Timer
callback func()
}
func (me *deadlineCallback) deadlineExceeded() bool {
return !me.deadline.IsZero() && !time.Now().Before(me.deadline)
}
func (me *deadlineCallback) updateTimer() {
if me.timer != nil {
me.timer.Stop()
}
if me.deadline.IsZero() {
return
}
if me.callback == nil {
panic("deadline callback is nil")
}
me.timer = time.AfterFunc(me.deadline.Sub(time.Now()), me.callback)
}
func (me *deadlineCallback) setDeadline(t time.Time) {
me.deadline = t
me.updateTimer()
}
func (me *deadlineCallback) setCallback(f func()) {
me.callback = f
me.updateTimer()
}
type connDeadlines struct {
// mu sync.Mutex
read, write deadlineCallback
}
func (c *connDeadlines) SetDeadline(t time.Time) error {
c.read.setDeadline(t)
c.write.setDeadline(t)
return nil
}
func (c *connDeadlines) SetReadDeadline(t time.Time) error {
c.read.setDeadline(t)
return nil
}
func (c *connDeadlines) SetWriteDeadline(t time.Time) error {
c.write.setDeadline(t)
return nil
}
// Strongly-type guarantee of resolved network address.
type resolvedAddrStr string
// Uniquely identifies any uTP connection on top of the underlying packet
// stream.
type connKey struct {
remoteAddr resolvedAddrStr
connID uint16
}
// A Socket wraps a net.PacketConn, diverting uTP packets to its child uTP
// Conns.
type Socket struct {
mu sync.RWMutex
event sync.Cond
pc net.PacketConn
conns map[connKey]*Conn
backlog map[syn]struct{}
reads chan read
closing bool
unusedReads chan read
connDeadlines
// If a read error occurs on the underlying net.PacketConn, it is put
// here. This is because reading is done in its own goroutine to dispatch
// to uTP Conns.
ReadErr error
}
type read struct {
data []byte
from net.Addr
}
type syn struct {
seq_nr, conn_id uint16
addr string
}
const (
extensionTypeSelectiveAck = 1
)
type extensionField struct {
Type byte
Bytes []byte
}
type header struct {
Type st
Version int
ConnID uint16
Timestamp uint32
TimestampDiff uint32
WndSize uint32
SeqNr uint16
AckNr uint16
Extensions []extensionField
}
var (
mu sync.RWMutex
logLevel = 0
artificialPacketDropChance = 0.0
)
func init() {
logLevel, _ = strconv.Atoi(os.Getenv("GO_UTP_LOGGING"))
fmt.Sscanf(os.Getenv("GO_UTP_PACKET_DROP"), "%f", &artificialPacketDropChance)
}
var (
errClosed = errors.New("closed")
errNotImplemented = errors.New("not implemented")
errTimeout net.Error = timeoutError{"i/o timeout"}
errAckTimeout = timeoutError{"timed out waiting for ack"}
)
type timeoutError struct {
msg string
}
func (me timeoutError) Timeout() bool { return true }
func (me timeoutError) Error() string { return me.msg }
func (me timeoutError) Temporary() bool { return false }
func unmarshalExtensions(_type byte, b []byte) (n int, ef []extensionField, err error) {
for _type != 0 {
if _type != extensionTypeSelectiveAck {
// An extension type that is not known to us. Generally we're
// unmarshalling an packet that isn't actually uTP but we don't
// yet know for sure until we try to deliver it.
// logonce.Stderr.Printf("utp extension %d", _type)
}
if len(b) < 2 || len(b) < int(b[1])+2 {
err = fmt.Errorf("buffer ends prematurely: %x", b)
return
}
ef = append(ef, extensionField{
Type: _type,
Bytes: append([]byte{}, b[2:int(b[1])+2]...),
})
_type = b[0]
n += 2 + int(b[1])
b = b[2+int(b[1]):]
}
return
}
var errInvalidHeader = errors.New("invalid header")
func (h *header) Unmarshal(b []byte) (n int, err error) {
h.Type = st(b[0] >> 4)
h.Version = int(b[0] & 0xf)
if h.Type > stMax || h.Version != 1 {
err = errInvalidHeader
return
}
n, h.Extensions, err = unmarshalExtensions(b[1], b[20:])
if err != nil {
return
}
h.ConnID = binary.BigEndian.Uint16(b[2:4])
h.Timestamp = binary.BigEndian.Uint32(b[4:8])
h.TimestampDiff = binary.BigEndian.Uint32(b[8:12])
h.WndSize = binary.BigEndian.Uint32(b[12:16])
h.SeqNr = binary.BigEndian.Uint16(b[16:18])
h.AckNr = binary.BigEndian.Uint16(b[18:20])
n += 20
return
}
func (h *header) Marshal() (ret []byte) {
hLen := 20 + func() (ret int) {
for _, ext := range h.Extensions {
ret += 2 + len(ext.Bytes)
}
return
}()
ret = sendBufferPool.Get().([]byte)[:hLen:minMTU]
// ret = make([]byte, hLen, minMTU)
p := ret // Used for manipulating ret.
p[0] = byte(h.Type<<4 | 1)
binary.BigEndian.PutUint16(p[2:4], h.ConnID)
binary.BigEndian.PutUint32(p[4:8], h.Timestamp)
binary.BigEndian.PutUint32(p[8:12], h.TimestampDiff)
binary.BigEndian.PutUint32(p[12:16], h.WndSize)
binary.BigEndian.PutUint16(p[16:18], h.SeqNr)
binary.BigEndian.PutUint16(p[18:20], h.AckNr)
// Pointer to the last type field so the next extension can set it.
_type := &p[1]
// We're done with the basic header.
p = p[20:]
for _, ext := range h.Extensions {
*_type = ext.Type
// The next extension's type will go here.
_type = &p[0]
p[1] = uint8(len(ext.Bytes))
if int(p[1]) != copy(p[2:], ext.Bytes) {
panic("unexpected extension length")
}
p = p[2+len(ext.Bytes):]
}
if len(p) != 0 {
panic("header length changed")
}
return
}
var (
_ net.Listener = &Socket{}
_ net.PacketConn = &Socket{}
)
type st int
func (me st) String() string {
switch me {
case stData:
return "stData"
case stFin:
return "stFin"
case stState:
return "stState"
case stReset:
return "stReset"
case stSyn:
return "stSyn"
default:
panic(fmt.Sprintf("%d", me))
}
}
const (
stData st = 0
stFin = 1
stState = 2
stReset = 3
stSyn = 4
// Used for validating packet headers.
stMax = stSyn
)
// Conn is a uTP stream and implements net.Conn. It owned by a Socket, which
// handles dispatching packets to and from Conns.
type Conn struct {
mu sync.Mutex
event sync.Cond
recv_id, send_id uint16
seq_nr, ack_nr uint16
lastAck uint16
lastTimeDiff uint32
peerWndSize uint32
cur_window uint32
// Data waiting to be Read.
readBuf []byte
socket *Socket
remoteAddr net.Addr
// The uTP timestamp.
startTimestamp uint32
// When the conn was allocated.
created time.Time
sentSyn bool
synAcked bool
gotFin bool
wroteFin bool
finAcked bool
err error
closing bool
closed bool
unackedSends []*send
// Inbound payloads, the first is ack_nr+1.
inbound []recv
inboundWnd uint32
packetsIn chan packet
connDeadlines
latencies []time.Duration
pendingSendState bool
}
type send struct {
acked bool // Closed with Conn lock.
payloadSize uint32
started time.Time
// This send was skipped in a selective ack.
resend func()
timedOut func()
conn *Conn
acksSkipped int
resendTimer *time.Timer
numResends int
}
func (s *send) Ack() (latency time.Duration, first bool) {
s.resendTimer.Stop()
if s.acked {
return
}
s.acked = true
s.conn.event.Broadcast()
first = true
latency = time.Since(s.started)
return
}
type recv struct {
seen bool
data []byte
Type st
}
var (
_ net.Conn = &Conn{}
)
func (c *Conn) age() time.Duration {
return time.Since(c.created)
}
func (c *Conn) timestamp() uint32 {
return nowTimestamp() - c.startTimestamp
}
// Create a Socket, using the provided net.PacketConn. If you want to retain
// use of the net.PacketConn after the Socket closes it, override your
// net.PacketConn's Close method.
func NewSocketFromPacketConn(pc net.PacketConn) (s *Socket, err error) {
s = &Socket{
backlog: make(map[syn]struct{}, backlog),
reads: make(chan read, 100),
pc: pc,
unusedReads: make(chan read, 100),
}
s.event.L = &s.mu
go s.reader()
go s.dispatcher()
return
}
// addr is used to create a listening UDP conn which becomes the underlying
// net.PacketConn for the Socket.
func NewSocket(network, addr string) (s *Socket, err error) {
pc, err := net.ListenPacket(network, addr)
if err != nil {
return
}
return NewSocketFromPacketConn(pc)
}
func packetDebugString(h *header, payload []byte) string {
return fmt.Sprintf("%s->%d: %q", h.Type, h.ConnID, payload)
}
func (s *Socket) reader() {
defer close(s.reads)
var b [maxRecvSize]byte
for {
if s.pc == nil {
break
}
n, addr, err := s.pc.ReadFrom(b[:])
if err != nil {
s.mu.Lock()
if !s.closing {
s.ReadErr = err
}
s.mu.Unlock()
return
}
var nilB []byte
s.reads <- read{append(nilB, b[:n:n]...), addr}
}
}
func (s *Socket) unusedRead(read read) {
unusedReads.Add(1)
select {
case s.unusedReads <- read:
default:
// Drop the packet.
}
}
func stringAddr(s string) net.Addr {
addr, err := net.ResolveUDPAddr("udp", s)
if err != nil {
panic(err)
}
return addr
}
func (s *Socket) pushBacklog(syn syn) {
if _, ok := s.backlog[syn]; ok {
return
}
for k := range s.backlog {
if len(s.backlog) < backlog {
break
}
delete(s.backlog, k)
// A syn is sent on the remote's recv_id, so this is where we can send
// the reset.
s.reset(stringAddr(k.addr), k.seq_nr, k.conn_id)
}
s.backlog[syn] = struct{}{}
s.event.Broadcast()
}
func (s *Socket) dispatcher() {
for {
select {
case read, ok := <-s.reads:
if !ok {
return
}
if len(read.data) < 20 {
s.unusedRead(read)
continue
}
s.dispatch(read)
}
}
}
func (s *Socket) dispatch(read read) {
b := read.data
addr := read.from
var h header
hEnd, err := h.Unmarshal(b)
if logLevel >= 1 {
log.Printf("recvd utp msg: %s", packetDebugString(&h, b[hEnd:]))
}
if err != nil || h.Type > stMax || h.Version != 1 {
s.unusedRead(read)
return
}
s.mu.Lock()
defer s.mu.Unlock()
c, ok := s.conns[connKey{resolvedAddrStr(addr.String()), func() (recvID uint16) {
recvID = h.ConnID
// If a SYN is resent, its connection ID field will be one lower
// than we expect.
if h.Type == stSyn {
recvID++
}
return
}()}]
if ok {
if h.Type == stSyn {
if h.ConnID == c.send_id-2 {
// This is a SYN for connection that cannot exist locally. The
// connection the remote wants to establish here with the proposed
// recv_id, already has an existing connection that was dialled
// *out* from this socket, which is why the send_id is 1 higher,
// rather than 1 lower than the recv_id.
log.Print("resetting conflicting syn")
s.reset(addr, h.SeqNr, h.ConnID)
return
} else if h.ConnID != c.send_id {
panic("bad assumption")
}
}
c.deliver(h, b[hEnd:])
return
}
if h.Type == stSyn {
if logLevel >= 1 {
log.Printf("adding SYN to backlog")
}
syn := syn{
seq_nr: h.SeqNr,
conn_id: h.ConnID,
addr: addr.String(),
}
s.pushBacklog(syn)
return
} else if h.Type != stReset {
// This is an unexpected packet. We'll send a reset, but also pass
// it on.
// log.Print("resetting unexpected packet")
// I don't think you can reset on the received packets ConnID if it isn't a SYN, as the send_id will differ in this case.
s.reset(addr, h.SeqNr, h.ConnID)
s.reset(addr, h.SeqNr, h.ConnID-1)
s.reset(addr, h.SeqNr, h.ConnID+1)
}
s.unusedRead(read)
}
// Send a reset in response to a packet with the given header.
func (s *Socket) reset(addr net.Addr, ackNr, connId uint16) {
go s.writeTo((&header{
Type: stReset,
Version: 1,
ConnID: connId,
AckNr: ackNr,
}).Marshal(), addr)
}
// Attempt to connect to a remote uTP listener, creating a Socket just for
// this connection.
func Dial(addr string) (net.Conn, error) {
return DialTimeout(addr, 0)
}
// Same as Dial with a timeout parameter.
func DialTimeout(addr string, timeout time.Duration) (nc net.Conn, err error) {
s, err := NewSocket("udp", ":0")
if err != nil {
return
}
return s.DialTimeout(addr, timeout)
}
// Return a recv_id that should be free. Handling the case where it isn't is
// deferred to a more appropriate function.
func (s *Socket) newConnID(remoteAddr resolvedAddrStr) (id uint16) {
// Rather than use math.Rand, which requires generating all the IDs up
// front and allocating a slice, we do it on the stack, generating the IDs
// only as required. To do this, we use the fact that the array is
// default-initialized. IDs that are 0, are actually their index in the
// array. IDs that are non-zero, are +1 from their intended ID.
var idsBack [0x10000]int
ids := idsBack[:]
for len(ids) != 0 {
// Pick the next ID from the untried ids.
i := rand.Intn(len(ids))
id = uint16(ids[i])
// If it's zero, then treat it as though the index i was the ID.
// Otherwise the value we get is the ID+1.
if id == 0 {
id = uint16(i)
} else {
id--
}
// Check there's no connection using this ID for its recv_id...
_, ok1 := s.conns[connKey{remoteAddr, id}]
// and if we're connecting to our own Socket, that there isn't a Conn
// already receiving on what will correspond to our send_id. Note that
// we just assume that we could be connecting to our own Socket. This
// will halve the available connection IDs to each distinct remote
// address. Presumably that's ~0x8000, down from ~0x10000.
_, ok2 := s.conns[connKey{remoteAddr, id + 1}]
_, ok4 := s.conns[connKey{remoteAddr, id - 1}]
if !ok1 && !ok2 && !ok4 {
return
}
// The set of possible IDs is shrinking. The highest one will be lost, so
// it's moved to the location of the one we just tried.
ids[i] = len(ids) // Conveniently already +1.
// And shrink.
ids = ids[:len(ids)-1]
}
return
}
func (c *Conn) sendPendingState() {
if !c.pendingSendState {
return
}
if c.closed {
c.sendReset()
} else {
c.sendState()
}
}
func (s *Socket) newConn(addr net.Addr) (c *Conn) {
c = &Conn{
socket: s,
remoteAddr: addr,
created: time.Now(),
packetsIn: make(chan packet, 100),
}
c.event.L = &c.mu
c.mu.Lock()
c.connDeadlines.read.setCallback(func() {
c.mu.Lock()
c.event.Broadcast()
c.mu.Unlock()
})
c.connDeadlines.write.setCallback(func() {
c.mu.Lock()
c.event.Broadcast()
c.mu.Unlock()
})
c.mu.Unlock()
go c.deliveryProcessor()
return
}
func (s *Socket) Dial(addr string) (net.Conn, error) {
return s.DialTimeout(addr, 0)
}
func (s *Socket) DialTimeout(addr string, timeout time.Duration) (nc net.Conn, err error) {
netAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return
}
s.mu.Lock()
c := s.newConn(netAddr)
c.recv_id = s.newConnID(resolvedAddrStr(netAddr.String()))
c.send_id = c.recv_id + 1
if logLevel >= 1 {
log.Printf("dial registering addr: %s", netAddr.String())
}
if !s.registerConn(c.recv_id, resolvedAddrStr(netAddr.String()), c) {
err = errors.New("couldn't register new connection")
log.Println(c.recv_id, netAddr.String())
for k, c := range s.conns {
log.Println(k, c, c.age())
}
log.Printf("that's %d connections", len(s.conns))
}
s.mu.Unlock()
if err != nil {
return
}
connErr := make(chan error, 1)
go func() {
connErr <- c.connect()
}()
var timeoutCh <-chan time.Time
if timeout != 0 {
timeoutCh = time.After(timeout)
}
select {
case err = <-connErr:
case <-timeoutCh:
err = errTimeout
}
if err == nil {
nc = c
} else {
c.Close()
}
return
}
func (c *Conn) wndSize() uint32 {
if len(c.inbound) > maxUnackedInbound/2 {
return 0
}
buffered := uint32(len(c.readBuf)) + c.inboundWnd
if buffered > recvWindow {
return 0
}
return recvWindow - buffered
}
func nowTimestamp() uint32 {
return uint32(time.Now().UnixNano() / int64(time.Microsecond))
}
// Send the given payload with an up to date header.
func (c *Conn) send(_type st, connID uint16, payload []byte, seqNr uint16) (err error) {
// Always selectively ack the first 64 packets. Don't bother with rest for
// now.
selAck := selectiveAckBitmask(make([]byte, 8))
for i := 1; i < 65; i++ {
if len(c.inbound) <= i {
break
}
if c.inbound[i].seen {
selAck.SetBit(i - 1)
}
}
h := header{
Type: _type,
Version: 1,
ConnID: connID,
SeqNr: seqNr,
AckNr: c.ack_nr,
WndSize: c.wndSize(),
Timestamp: c.timestamp(),
TimestampDiff: c.lastTimeDiff,
// Currently always send an 8 byte selective ack.
Extensions: []extensionField{{
Type: extensionTypeSelectiveAck,
Bytes: selAck,
}},
}
p := h.Marshal()
// Extension headers are currently fixed in size.
if len(p) != maxHeaderSize {
panic("header has unexpected size")
}
p = append(p, payload...)
if logLevel >= 1 {
log.Printf("writing utp msg to %s: %s", c.remoteAddr, packetDebugString(&h, payload))
}
n1, err := c.socket.writeTo(p, c.remoteAddr)
if err != nil {
return
}
if n1 != len(p) {
panic(n1)
}
c.unpendSendState()
return
}
func (me *Conn) unpendSendState() {
me.pendingSendState = false
}
func (c *Conn) pendSendState() {
c.pendingSendState = true
}
func (me *Socket) writeTo(b []byte, addr net.Addr) (n int, err error) {
mu.RLock()
apdc := artificialPacketDropChance
mu.RUnlock()
if apdc != 0 {
if rand.Float64() < apdc {
n = len(b)
return
}
}
n, err = me.pc.WriteTo(b, addr)
return
}
func (s *send) timeoutResend() {
if time.Since(s.started) >= writeTimeout {
s.timedOut()
return
}
s.conn.mu.Lock()
defer s.conn.mu.Unlock()
if s.acked || s.conn.closed {
return
}
rt := s.conn.resendTimeout()
go s.resend()
s.numResends++
s.resendTimer.Reset(rt * time.Duration(s.numResends))
}
func (me *Conn) writeSyn() {
if me.sentSyn {
panic("already sent syn")
}
me.write(stSyn, me.recv_id, nil, me.seq_nr)
return
}
func (c *Conn) write(_type st, connID uint16, payload []byte, seqNr uint16) (n int) {
switch _type {
case stSyn, stFin, stData:
default:
panic(_type)
}
if c.wroteFin {
panic("can't write after fin")
}
if len(payload) > maxPayloadSize {
payload = payload[:maxPayloadSize]
}
err := c.send(_type, connID, payload, seqNr)
if err != nil {
c.destroy(fmt.Errorf("error sending packet: %s", err))
return
}
n = len(payload)
// Copy payload so caller to write can continue to use the buffer.
if payload != nil {
payload = append(sendBufferPool.Get().([]byte)[:0:minMTU], payload...)
}
send := &send{
payloadSize: uint32(len(payload)),
started: time.Now(),
resend: func() {
c.mu.Lock()
err := c.send(_type, connID, payload, seqNr)
if err != nil {
log.Printf("error resending packet: %s", err)
}
c.mu.Unlock()
},
timedOut: func() {
c.mu.Lock()
c.destroy(errAckTimeout)
c.mu.Unlock()
},
conn: c,
}
send.resendTimer = time.AfterFunc(c.resendTimeout(), send.timeoutResend)
c.unackedSends = append(c.unackedSends, send)
c.cur_window += send.payloadSize
c.seq_nr++
return
}
func (c *Conn) latency() (ret time.Duration) {
if len(c.latencies) == 0 {
return initialLatency
}
for _, l := range c.latencies {
ret += l
}
ret = (ret + time.Duration(len(c.latencies)) - 1) / time.Duration(len(c.latencies))
return
}
func (c *Conn) numUnackedSends() (num int) {
for _, s := range c.unackedSends {
if !s.acked {
num++
}
}
return
}
func (c *Conn) sendState() {
c.send(stState, c.send_id, nil, c.seq_nr)
sentStatePackets.Add(1)
}
func (c *Conn) sendReset() {
c.send(stReset, c.send_id, nil, c.seq_nr)
}
func seqLess(a, b uint16) bool {
if b < 0x8000 {
return a < b || a >= b-0x8000
} else {
return a < b && a >= b-0x8000
}
}
// Ack our send with the given sequence number.
func (c *Conn) ack(nr uint16) {
if !seqLess(c.lastAck, nr) {
// Already acked.
return
}
i := nr - c.lastAck - 1
if int(i) >= len(c.unackedSends) {
log.Printf("got ack ahead of syn (%x > %x)", nr, c.seq_nr-1)
return
}
s := c.unackedSends[i]
latency, first := s.Ack()
if first {
c.cur_window -= s.payloadSize
c.latencies = append(c.latencies, latency)
if len(c.latencies) > 10 {
c.latencies = c.latencies[len(c.latencies)-10:]
}
}
for {
if len(c.unackedSends) == 0 {
break
}
if !c.unackedSends[0].acked {
// Can't trim unacked sends any further.
return
}
// Trim the front of the unacked sends.
c.unackedSends = c.unackedSends[1:]
c.lastAck++
}
c.event.Broadcast()
}
func (c *Conn) ackTo(nr uint16) {
if !seqLess(nr, c.seq_nr) {
return
}
for seqLess(c.lastAck, nr) {
c.ack(c.lastAck + 1)
}
}
type selectiveAckBitmask []byte
func (me selectiveAckBitmask) NumBits() int {
return len(me) * 8
}
func (me selectiveAckBitmask) SetBit(index int) {
me[index/8] |= 1 << uint(index%8)
}
func (me selectiveAckBitmask) BitIsSet(index int) bool {
return me[index/8]>>uint(index%8)&1 == 1
}
// Return the send state for the sequence number. Returns nil if there's no
// outstanding send for that sequence number.
func (c *Conn) seqSend(seqNr uint16) *send {
if !seqLess(c.lastAck, seqNr) {
// Presumably already acked.
return nil
}
i := int(seqNr - c.lastAck - 1)
if i >= len(c.unackedSends) {
// No such send.
return nil
}
return c.unackedSends[i]
}
func (c *Conn) resendTimeout() time.Duration {
l := c.latency()
ret := jitter.Duration(3*l, l)
return ret
}
func (c *Conn) ackSkipped(seqNr uint16) {
send := c.seqSend(seqNr)
if send == nil {
return
}
send.acksSkipped++
switch send.acksSkipped {
case 3, 60:
ackSkippedResends.Add(1)
go send.resend()
send.resendTimer.Reset(c.resendTimeout() * time.Duration(send.numResends))
default:
}
}
type packet struct {
h header
payload []byte
}
func (c *Conn) deliver(h header, payload []byte) {
c.packetsIn <- packet{h, payload}
}
func (c *Conn) deliveryProcessor() {
timeout := time.NewTimer(math.MaxInt64)
for {
timeout.Reset(packetReadTimeout)
select {
case p, ok := <-c.packetsIn:
if !ok {
return
}
c.processDelivery(p.h, p.payload)
timeout := time.After(500 * time.Microsecond)
batched:
for {
select {
case p, ok := <-c.packetsIn:
if !ok {
break batched
}
c.processDelivery(p.h, p.payload)
case <-timeout:
break batched
}
}
c.mu.Lock()
c.sendPendingState()
c.mu.Unlock()
case <-timeout.C:
c.mu.Lock()
c.destroy(errors.New("no packet read timeout"))
c.mu.Unlock()
}
}
}
func (c *Conn) updateStates() {
if c.wroteFin && len(c.unackedSends) <= 1 && c.gotFin {
c.closed = true
c.event.Broadcast()
}
}
func (c *Conn) processDelivery(h header, payload []byte) {
deliveriesProcessed.Add(1)
c.mu.Lock()
defer c.mu.Unlock()
defer c.updateStates()
defer c.event.Broadcast()
c.assertHeader(h)
c.peerWndSize = h.WndSize
c.applyAcks(h)
if h.Timestamp == 0 {
c.lastTimeDiff = 0
} else {
c.lastTimeDiff = c.timestamp() - h.Timestamp
}
if h.Type == stReset {
c.destroy(errors.New("peer reset"))
return
}
if !c.synAcked {
if h.Type != stState {
return
}
c.synAcked = true
c.ack_nr = h.SeqNr - 1
return
}
if h.Type == stState {
return
}
c.pendSendState()
if !seqLess(c.ack_nr, h.SeqNr) {
// Already received this packet.
return
}
inboundIndex := int(h.SeqNr - c.ack_nr - 1)
if inboundIndex < len(c.inbound) && c.inbound[inboundIndex].seen {
// Already received this packet.
return
}
// Derived from running in production:
// grep -oP '(?<=packet out of order, index=)\d+' log | sort -n | uniq -c
// 64 should correspond to 8 bytes of selective ack.
if inboundIndex >= maxUnackedInbound {
// Discard packet too far ahead.
if logLevel >= 1 {
log.Printf("received packet from %s %d ahead of next seqnr (%x > %x)", c.remoteAddr, inboundIndex, h.SeqNr, c.ack_nr+1)
}
return
}
// Extend inbound so the new packet has a place.
for inboundIndex >= len(c.inbound) {
c.inbound = append(c.inbound, recv{})
}
c.inbound[inboundIndex] = recv{true, payload, h.Type}
c.inboundWnd += uint32(len(payload))
c.processInbound()
}
func (c *Conn) applyAcks(h header) {
c.ackTo(h.AckNr)
for _, ext := range h.Extensions {
switch ext.Type {
case extensionTypeSelectiveAck:
c.ackSkipped(h.AckNr + 1)
bitmask := selectiveAckBitmask(ext.Bytes)
for i := 0; i < bitmask.NumBits(); i++ {
if bitmask.BitIsSet(i) {
nr := h.AckNr + 2 + uint16(i)
// log.Printf("selectively acked %d", nr)
c.ack(nr)
} else {
c.ackSkipped(h.AckNr + 2 + uint16(i))
}
}
}
}
}
func (c *Conn) assertHeader(h header) {
if h.Type == stSyn {
if h.ConnID != c.send_id {
panic(fmt.Sprintf("%d != %d", h.ConnID, c.send_id))
}
} else {
if h.ConnID != c.recv_id {
panic("erroneous delivery")
}
}
}
func (c *Conn) processInbound() {
// Consume consecutive next packets.
for !c.gotFin && len(c.inbound) > 0 && c.inbound[0].seen {
c.ack_nr++
p := c.inbound[0]
c.inbound = c.inbound[1:]
c.inboundWnd -= uint32(len(p.data))
c.readBuf = append(c.readBuf, p.data...)
if p.Type == stFin {
c.gotFin = true
}
}
}
func (c *Conn) waitAck(seq uint16) {
send := c.seqSend(seq)
if send == nil {
return
}
for !send.acked && !c.closed {
c.event.Wait()
}
return
}
func (c *Conn) connect() (err error) {
c.mu.Lock()
defer c.mu.Unlock()
c.seq_nr = 1
c.writeSyn()
c.sentSyn = true
if logLevel >= 2 {
log.Printf("sent syn")
}
// c.seq_nr++
c.waitAck(1)
if c.err != nil {
err = c.err
}
c.synAcked = true
c.event.Broadcast()
return err
}
func (s *Socket) detacher(c *Conn, key connKey) {
c.mu.Lock()
for !c.closed {
c.event.Wait()
}
c.mu.Unlock()
s.mu.Lock()
defer s.mu.Unlock()
if s.conns[key] != c {
panic("conn changed")
}
delete(s.conns, key)
close(c.packetsIn)
s.event.Broadcast()
if s.closing {
s.teardown()
}
}
// Returns true if the connection was newly registered, false otherwise.
func (s *Socket) registerConn(recvID uint16, remoteAddr resolvedAddrStr, c *Conn) bool {
if s.conns == nil {
s.conns = make(map[connKey]*Conn)
}
key := connKey{remoteAddr, recvID}
if _, ok := s.conns[key]; ok {
return false
}
s.conns[key] = c
go s.detacher(c, key)
return true
}
func (s *Socket) nextSyn() (syn syn, ok bool) {
s.mu.Lock()
defer s.mu.Unlock()
for {
if s.closing {
return
}
for k := range s.backlog {
syn = k
delete(s.backlog, k)
ok = true
return
}
s.event.Wait()
}
}
// Accept and return a new uTP connection.
func (s *Socket) Accept() (c net.Conn, err error) {
for {
syn, ok := s.nextSyn()
if !ok {
err = errClosed
return
}
s.mu.Lock()
_c := s.newConn(stringAddr(syn.addr))
_c.send_id = syn.conn_id
_c.recv_id = _c.send_id + 1
_c.seq_nr = uint16(rand.Int())
_c.lastAck = _c.seq_nr - 1
_c.ack_nr = syn.seq_nr
_c.sentSyn = true
_c.synAcked = true
if !s.registerConn(_c.recv_id, resolvedAddrStr(syn.addr), _c) {
// SYN that triggered this accept duplicates existing connection.
// Ack again in case the SYN was a resend.
_c = s.conns[connKey{resolvedAddrStr(syn.addr), _c.recv_id}]
if _c.send_id != syn.conn_id {
panic(":|")
}
_c.sendState()
s.mu.Unlock()
continue
}
_c.sendState()
// _c.seq_nr++
c = _c
s.mu.Unlock()
return
}
}
// The address we're listening on for new uTP connections.
func (s *Socket) Addr() net.Addr {
return s.pc.LocalAddr()
}
func (s *Socket) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
s.closing = true
s.event.Broadcast()
return s.teardown()
}
func (s *Socket) teardown() (err error) {
if len(s.conns) == 0 {
s.event.Broadcast()
err = s.pc.Close()
}
return
}
func (s *Socket) LocalAddr() net.Addr {
return s.pc.LocalAddr()
}
func (s *Socket) ReadFrom(p []byte) (n int, addr net.Addr, err error) {
read, ok := <-s.unusedReads
if !ok {
err = io.EOF
}
n = copy(p, read.data)
addr = read.from
return
}
func (s *Socket) WriteTo(b []byte, addr net.Addr) (int, error) {
return s.pc.WriteTo(b, addr)
}
func (c *Conn) writeFin() {
if c.wroteFin {
return
}
c.write(stFin, c.send_id, nil, c.seq_nr)
c.wroteFin = true
c.event.Broadcast()
return
}
func (c *Conn) destroy(reason error) {
if c.closed {
return
}
c.closed = true
c.event.Broadcast()
c.err = reason
}
func (c *Conn) Close() (err error) {
c.mu.Lock()
defer c.mu.Unlock()
c.closing = true
c.event.Broadcast()
c.writeFin()
for {
if c.wroteFin && len(c.unackedSends) <= 1 {
// Sent FIN and it's the only thing unacked.
break
}
if c.closed {
err = c.err
break
}
c.event.Wait()
}
return
}
func (c *Conn) LocalAddr() net.Addr {
return c.socket.Addr()
}
func (c *Conn) Read(b []byte) (n int, err error) {
c.mu.Lock()
defer c.mu.Unlock()
for {
if len(c.readBuf) != 0 {
break
}
if c.gotFin {
err = io.EOF
return
}
if c.closed {
if c.err == nil {
panic("closed without receiving fin, and no error")
}
err = c.err
return
}
if c.connDeadlines.read.deadlineExceeded() {
err = errTimeout
return
}
c.event.Wait()
}
n = copy(b, c.readBuf)
c.readBuf = c.readBuf[n:]
return
}
func (c *Conn) RemoteAddr() net.Addr {
return c.remoteAddr
}
func (c *Conn) String() string {
return fmt.Sprintf("<UTPConn %s-%s (%d)>", c.LocalAddr(), c.RemoteAddr(), c.recv_id)
}
func (c *Conn) Write(p []byte) (n int, err error) {
c.mu.Lock()
defer c.mu.Unlock()
for len(p) != 0 {
for {
if c.wroteFin || c.gotFin {
err = io.ErrClosedPipe
return
}
if c.connDeadlines.write.deadlineExceeded() {
err = errTimeout
return
}
// If peerWndSize is 0, we still want to send something, so don't
// block until we exceed it.
if c.synAcked &&
len(c.unackedSends) < maxUnackedSends &&
c.cur_window <= c.peerWndSize {
break
}
c.event.Wait()
}
var n1 int
n1 = c.write(stData, c.send_id, p, c.seq_nr)
// c.seq_nr++
n += n1
p = p[n1:]
}
return
}
package utp
import (
"fmt"
"io"
"io/ioutil"
"log"
"net"
"runtime"
"sync"
"testing"
"time"
_ "QmWsa476RGjb9scWzcRVts3QZsYjU5Kt6Y9qe8Q3vc5FHR/envpprof"
"github.com/anacrolix/missinggo"
"github.com/bradfitz/iter"
"github.com/stretchr/testify/require"
)
func init() {
log.SetFlags(log.Flags() | log.Lshortfile)
writeTimeout = 1 * time.Second
initialLatency = 10 * time.Millisecond
packetReadTimeout = 2 * time.Second
}
func TestUTPPingPong(t *testing.T) {
defer goroutineLeakCheck(t)()
s, err := NewSocket("udp", "localhost:0")
require.NoError(t, err)
defer s.Close()
pingerClosed := make(chan struct{})
go func() {
defer close(pingerClosed)
b, err := Dial(s.Addr().String())
require.NoError(t, err)
defer b.Close()
n, err := b.Write([]byte("ping"))
require.NoError(t, err)
require.EqualValues(t, 4, n)
buf := make([]byte, 4)
b.Read(buf)
require.EqualValues(t, "pong", buf)
log.Printf("got pong")
}()
a, err := s.Accept()
require.NoError(t, err)
defer a.Close()
log.Printf("accepted %s", a)
buf := make([]byte, 42)
n, err := a.Read(buf)
require.NoError(t, err)
require.EqualValues(t, "ping", buf[:n])
log.Print("got ping")
n, err = a.Write([]byte("pong"))
require.NoError(t, err)
require.Equal(t, 4, n)
log.Print("waiting for pinger to close")
<-pingerClosed
}
func goroutineLeakCheck(t testing.TB) func() {
if !testing.Verbose() {
return func() {}
}
numStart := runtime.NumGoroutine()
return func() {
var numNow int
for range iter.N(1) {
numNow = runtime.NumGoroutine()
if numNow == numStart {
return
}
time.Sleep(10 * time.Millisecond)
}
// I'd print stacks, or treat this as fatal, but I think
// runtime.NumGoroutine is including system routines for which we are
// not provided the stacks, and are spawned unpredictably.
t.Logf("have %d goroutines, started with %d", numNow, numStart)
}
}
func TestDialTimeout(t *testing.T) {
defer goroutineLeakCheck(t)()
s, _ := NewSocket("udp", "localhost:0")
defer s.Close()
conn, err := DialTimeout(s.Addr().String(), 10*time.Millisecond)
if err == nil {
conn.Close()
t.Fatal("expected timeout")
}
t.Log(err)
}
func TestMinMaxHeaderType(t *testing.T) {
require.Equal(t, stSyn, stMax)
}
func TestUTPRawConn(t *testing.T) {
l, err := NewSocket("udp", "")
require.NoError(t, err)
defer l.Close()
go func() {
for {
_, err := l.Accept()
if err != nil {
break
}
}
}()
// Connect a UTP peer to see if the RawConn will still work.
log.Print("dialing")
utpPeer := func() net.Conn {
s, _ := NewSocket("udp", "")
defer s.Close()
ret, err := s.Dial(fmt.Sprintf("localhost:%d", missinggo.AddrPort(l.Addr())))
require.NoError(t, err)
return ret
}()
log.Print("dial returned")
if err != nil {
t.Fatalf("error dialing utp listener: %s", err)
}
defer utpPeer.Close()
peer, err := net.ListenPacket("udp", ":0")
if err != nil {
t.Fatal(err)
}
defer peer.Close()
msgsReceived := 0
const N = 5000 // How many messages to send.
readerStopped := make(chan struct{})
// The reader goroutine.
go func() {
defer close(readerStopped)
b := make([]byte, 500)
for i := 0; i < N; i++ {
n, _, err := l.ReadFrom(b)
if err != nil {
t.Fatalf("error reading from raw conn: %s", err)
}
msgsReceived++
var d int
fmt.Sscan(string(b[:n]), &d)
if d != i {
log.Printf("got wrong number: expected %d, got %d", i, d)
}
}
}()
udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("localhost:%d", missinggo.AddrPort(l.Addr())))
if err != nil {
t.Fatal(err)
}
for i := 0; i < N; i++ {
_, err := peer.WriteTo([]byte(fmt.Sprintf("%d", i)), udpAddr)
if err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Microsecond)
}
select {
case <-readerStopped:
case <-time.After(time.Second):
t.Fatal("reader timed out")
}
if msgsReceived != N {
t.Fatalf("messages received: %d", msgsReceived)
}
}
func TestConnReadDeadline(t *testing.T) {
t.Parallel()
ls, _ := NewSocket("udp", "localhost:0")
ds, _ := NewSocket("udp", "localhost:0")
dcReadErr := make(chan error)
go func() {
c, _ := ds.Dial(ls.Addr().String())
defer c.Close()
_, err := c.Read(nil)
dcReadErr <- err
}()
c, _ := ls.Accept()
dl := time.Now().Add(time.Millisecond)
c.SetReadDeadline(dl)
_, err := c.Read(nil)
require.Equal(t, errTimeout, err)
// The deadline has passed.
if !time.Now().After(dl) {
t.FailNow()
}
// Returns timeout on subsequent read.
_, err = c.Read(nil)
require.Equal(t, errTimeout, err)
// Disable the deadline.
c.SetReadDeadline(time.Time{})
readReturned := make(chan struct{})
go func() {
c.Read(nil)
close(readReturned)
}()
select {
case <-readReturned:
// Read returned but shouldn't have.
t.FailNow()
case <-time.After(time.Millisecond):
}
c.Close()
if err := <-dcReadErr; err != io.EOF {
t.Fatalf("dial conn read returned %s", err)
}
select {
case <-readReturned:
case <-time.After(time.Millisecond):
t.Fatal("read should return after Conn is closed")
}
}
func connectSelfLots(n int, t testing.TB) {
defer goroutineLeakCheck(t)()
s, err := NewSocket("udp", "localhost:0")
if err != nil {
t.Fatal(err)
}
go func() {
for range iter.N(n) {
c, err := s.Accept()
if err != nil {
log.Fatal(err)
}
defer c.Close()
}
}()
dialErr := make(chan error)
connCh := make(chan net.Conn)
dialSema := make(chan struct{}, backlog)
for range iter.N(n) {
go func() {
dialSema <- struct{}{}
c, err := s.Dial(s.Addr().String())
<-dialSema
if err != nil {
dialErr <- err
return
}
connCh <- c
}()
}
conns := make([]net.Conn, 0, n)
for range iter.N(n) {
select {
case c := <-connCh:
conns = append(conns, c)
case err := <-dialErr:
t.Fatal(err)
}
}
for _, c := range conns {
if c != nil {
c.Close()
}
}
s.mu.Lock()
for len(s.conns) != 0 {
// log.Print(len(s.conns))
s.event.Wait()
}
s.mu.Unlock()
s.Close()
}
// Connect to ourself heaps.
func TestConnectSelf(t *testing.T) {
t.Parallel()
// A rough guess says that at worst, I can only have 0x10000/3 connections
// to the same socket, due to fragmentation in the assigned connection
// IDs.
connectSelfLots(0x1000, t)
}
func BenchmarkConnectSelf(b *testing.B) {
for range iter.N(b.N) {
connectSelfLots(2, b)
}
}
func BenchmarkNewCloseSocket(b *testing.B) {
for range iter.N(b.N) {
s, err := NewSocket("udp", "localhost:0")
if err != nil {
b.Fatal(err)
}
err = s.Close()
if err != nil {
b.Fatal(err)
}
}
}
func TestRejectDialBacklogFilled(t *testing.T) {
s, err := NewSocket("udp", "localhost:0")
if err != nil {
t.Fatal(err)
}
errChan := make(chan error, 1)
dial := func() {
_, err := s.Dial(s.Addr().String())
if err != nil {
errChan <- err
}
}
// Fill the backlog.
for range iter.N(backlog + 1) {
go dial()
}
s.mu.Lock()
for len(s.backlog) < backlog {
s.event.Wait()
}
s.mu.Unlock()
select {
case <-errChan:
t.FailNow()
default:
}
// One more connection should cause a dial attempt to get reset.
go dial()
err = <-errChan
if err.Error() != "peer reset" {
t.FailNow()
}
s.Close()
}
// Make sure that we can reset AfterFunc timers, so we don't have to create
// brand new ones everytime they fire. Specifically for the Conn resend timer.
func TestResetAfterFuncTimer(t *testing.T) {
t.Parallel()
fired := make(chan struct{})
timer := time.AfterFunc(time.Millisecond, func() {
fired <- struct{}{}
})
<-fired
if timer.Reset(time.Millisecond) {
// The timer should have expired
t.FailNow()
}
<-fired
}
func connPair() (initer, accepted net.Conn) {
s, err := NewSocket("udp", "localhost:0")
if err != nil {
panic(err)
}
defer s.Close()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var err error
initer, err = Dial(s.Addr().String())
if err != nil {
panic(err)
}
}()
accepted, err = s.Accept()
if err != nil {
panic(err)
}
wg.Wait()
return
}
// Check that peer sending FIN doesn't cause unread data to be dropped in a
// receiver.
func TestReadFinishedConn(t *testing.T) {
a, b := connPair()
defer a.Close()
defer b.Close()
mu.Lock()
originalAPDC := artificialPacketDropChance
artificialPacketDropChance = 1
mu.Unlock()
n, err := a.Write([]byte("hello"))
require.Equal(t, 5, n)
require.NoError(t, err)
n, err = a.Write([]byte("world"))
require.Equal(t, 5, n)
require.NoError(t, err)
mu.Lock()
artificialPacketDropChance = originalAPDC
mu.Unlock()
a.Close()
all, err := ioutil.ReadAll(b)
require.NoError(t, err)
require.EqualValues(t, "helloworld", all)
}
func TestCloseDetachesQuickly(t *testing.T) {
t.Parallel()
s, _ := NewSocket("udp", "localhost:0")
defer s.Close()
go func() {
a, _ := s.Dial(s.Addr().String())
log.Print("close a")
a.Close()
log.Print("closed a")
}()
b, _ := s.Accept()
b.Close()
s.mu.Lock()
for len(s.conns) != 0 {
log.Print(len(s.conns))
s.event.Wait()
}
s.mu.Unlock()
}
// Check that closing, and resulting detach of a Conn doesn't close the parent
// Socket. We Accept, then close the connection and ensure it's detached. Then
// Accept again to check the Socket is still functional and unclosed.
func TestConnCloseUnclosedSocket(t *testing.T) {
t.Parallel()
s, err := NewSocket("udp", "localhost:0")
require.NoError(t, err)
defer func() {
require.NoError(t, s.Close())
}()
// Prevents the dialing goroutine from closing its end of the Conn before
// we can check that it has been registered in the listener.
dialerSync := make(chan struct{})
go func() {
for range iter.N(2) {
c, err := Dial(s.Addr().String())
require.NoError(t, err)
<-dialerSync
err = c.Close()
require.NoError(t, err)
}
}()
for range iter.N(2) {
a, err := s.Accept()
require.NoError(t, err)
// We do this in a closure because we need to unlock Server.mu if the
// test failure exception is thrown. "Do as we say, not as we do" -Go
// team.
func() {
s.mu.Lock()
defer s.mu.Unlock()
require.Len(t, s.conns, 1)
}()
dialerSync <- struct{}{}
require.NoError(t, a.Close())
func() {
s.mu.Lock()
defer s.mu.Unlock()
for len(s.conns) != 0 {
s.event.Wait()
}
}()
}
}
func TestAcceptGone(t *testing.T) {
s, _ := NewSocket("udp", "localhost:0")
_, err := DialTimeout(s.Addr().String(), time.Millisecond)
require.Error(t, err)
c, _ := s.Accept()
c.SetReadDeadline(time.Now().Add(time.Millisecond))
c.Read(nil)
// select {}
}
func TestPacketReadTimeout(t *testing.T) {
t.Parallel()
a, b := connPair()
_, err := a.Read(nil)
require.Contains(t, err.Error(), "timeout")
t.Log(err)
t.Log(a.Close())
t.Log(b.Close())
}
The MIT License (MIT)
Copyright (c) 2015 Jeromy Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
# go-multiaddr-filter -- CIDR netmasks with multiaddr
This module creates very simple [multiaddr](https://github.com/jbenet/go-multiaddr) formatted cidr netmasks.
It doesn't do full multiaddr parsing to save on vendoring things and perf. The `net` package will take care of verifying the validity of the network part anyway.
## Usage
```go
import filter "github.com/whyrusleeping/multiaddr-filter"
filter.NewMask("/ip4/192.168.0.0/24") // ipv4
filter.NewMask("/ip6/fe80::/64") // ipv6
```
package mask
import (
"errors"
"fmt"
"net"
"strings"
manet "QmanZCL6SXRfafiUEMCBLq2QR171uQSdXQ8YAdHXLd8Cwr/go-multiaddr-net"
)
var ErrInvalidFormat = errors.New("invalid multiaddr-filter format")
func NewMask(a string) (*net.IPNet, error) {
parts := strings.Split(a, "/")
if parts[0] != "" {
return nil, ErrInvalidFormat
}
if len(parts) != 5 {
return nil, ErrInvalidFormat
}
// check it's a valid filter address. ip + cidr
isip := parts[1] == "ip4" || parts[1] == "ip6"
iscidr := parts[3] == "ipcidr"
if !isip || !iscidr {
return nil, ErrInvalidFormat
}
_, ipn, err := net.ParseCIDR(parts[2] + "/" + parts[4])
if err != nil {
return nil, err
}
return ipn, nil
}
func ConvertIPNet(n *net.IPNet) (string, error) {
addr, err := manet.FromIP(n.IP)
if err != nil {
return "", err
}
b, _ := n.Mask.Size()
return fmt.Sprintf("%s/ipcidr/%d", addr, b), nil
}
package mask
import (
"net"
"testing"
)
func TestValidMasks(t *testing.T) {
cidrOrFatal := func(s string) *net.IPNet {
_, ipn, err := net.ParseCIDR(s)
if err != nil {
t.Fatal(err)
}
return ipn
}
testCases := map[string]*net.IPNet{
"/ip4/1.2.3.4/ipcidr/0": cidrOrFatal("1.2.3.4/0"),
"/ip4/1.2.3.4/ipcidr/32": cidrOrFatal("1.2.3.4/32"),
"/ip4/1.2.3.4/ipcidr/24": cidrOrFatal("1.2.3.4/24"),
"/ip4/192.168.0.0/ipcidr/28": cidrOrFatal("192.168.0.0/28"),
"/ip6/fe80::/ipcidr/0": cidrOrFatal("fe80::/0"),
"/ip6/fe80::/ipcidr/64": cidrOrFatal("fe80::/64"),
"/ip6/fe80::/ipcidr/128": cidrOrFatal("fe80::/128"),
}
for s, m1 := range testCases {
m2, err := NewMask(s)
if err != nil {
t.Error("should be invalid:", s)
continue
}
if m1.String() != m2.String() {
t.Error("masks not equal:", m1, m2)
}
}
}
func TestInvalidMasks(t *testing.T) {
testCases := []string{
"/",
"/ip4/10.1.2.3",
"/ip6/::",
"/ip4/1.2.3.4/cidr/24",
"/ip6/fe80::/cidr/24",
"/eth/aa:aa:aa:aa:aa/ipcidr/24",
"foobar/ip4/1.2.3.4/ipcidr/32",
}
for _, s := range testCases {
_, err := NewMask(s)
if err != ErrInvalidFormat {
t.Error("should be invalid:", s)
}
}
testCases2 := []string{
"/ip4/1.2.3.4/ipcidr/33",
"/ip4/192.168.0.0/ipcidr/-1",
"/ip6/fe80::/ipcidr/129",
}
for _, s := range testCases2 {
_, err := NewMask(s)
if err == nil {
t.Error("should be invalid:", s)
}
}
}
func TestFiltered(t *testing.T) {
var tests = map[string]map[string]bool{
"/ip4/10.0.0.0/ipcidr/8": map[string]bool{
"10.3.3.4": true,
"10.3.4.4": true,
"10.4.4.4": true,
"15.52.34.3": false,
},
"/ip4/192.168.0.0/ipcidr/16": map[string]bool{
"192.168.0.0": true,
"192.168.1.0": true,
"192.1.0.0": false,
"10.4.4.4": false,
},
}
for mask, set := range tests {
m, err := NewMask(mask)
if err != nil {
t.Fatal(err)
}
for addr, val := range set {
ip := net.ParseIP(addr)
if m.Contains(ip) != val {
t.Fatalf("expected contains(%s, %s) == %s", mask, addr, val)
}
}
}
}
func TestParsing(t *testing.T) {
var addrs = map[string]string{
"/ip4/192.168.0.0/ipcidr/16": "192.168.0.0/16",
"/ip4/192.0.0.0/ipcidr/8": "192.0.0.0/8",
"/ip6/2001:db8::/ipcidr/32": "2001:db8::/32",
}
for k, v := range addrs {
m, err := NewMask(k)
if err != nil {
t.Fatal(err)
}
if m.String() != v {
t.Fatalf("mask is wrong: ", m, v)
}
orig, err := ConvertIPNet(m)
if err != nil {
t.Fatal(err)
}
if orig != k {
t.Fatal("backwards conversion failed: ", orig, k)
}
}
}
{
"name": "multiaddr-filter",
"author": "whyrusleeping",
"version": "1.0.0",
"gxDependencies": [
{
"name": "go-multiaddr-net",
"hash": "QmanZCL6SXRfafiUEMCBLq2QR171uQSdXQ8YAdHXLd8Cwr",
"version": "1.0.0"
}
],
"language": "go",
"gx": {
"dvcsimport": "github.com/whyrusleeping/multiaddr-filter"
}
}
\ No newline at end of file
package envpprof
import (
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strings"
)
var (
pprofDir = filepath.Join(os.Getenv("HOME"), "pprof")
heap bool
)
func writeHeapProfile() {
os.Mkdir(pprofDir, 0750)
f, err := ioutil.TempFile(pprofDir, "heap")
if err != nil {
log.Printf("error creating heap profile file: %s", err)
return
}
defer f.Close()
pprof.WriteHeapProfile(f)
log.Printf("wrote heap profile to %q", f.Name())
}
func Stop() {
pprof.StopCPUProfile()
if heap {
writeHeapProfile()
}
}
func init() {
for _, item := range strings.Split(os.Getenv("GOPPROF"), ",") {
equalsPos := strings.IndexByte(item, '=')
var key, value string
if equalsPos < 0 {
key = item
} else {
key = item[:equalsPos]
value = item[equalsPos+1:]
}
if value != "" {
log.Printf("values not yet supported")
}
switch key {
case "http":
go func() {
var l net.Listener
for port := uint16(6061); port != 6060; port++ {
var err error
l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port))
if err == nil {
break
}
}
if l == nil {
log.Print("unable to create envpprof listener for http")
return
}
defer l.Close()
log.Printf("envpprof serving http://%s", l.Addr())
log.Printf("error serving http on envpprof listener: %s", http.Serve(l, nil))
}()
case "cpu":
os.Mkdir(pprofDir, 0750)
f, err := ioutil.TempFile(pprofDir, "cpu")
if err != nil {
log.Printf("error creating cpu pprof file: %s", err)
break
}
err = pprof.StartCPUProfile(f)
if err != nil {
log.Printf("error starting cpu profiling: %s", err)
break
}
log.Printf("cpu profiling to file %q", f.Name())
case "block":
runtime.SetBlockProfileRate(1)
case "heap":
heap = true
}
}
}
{
"name": "envpprof",
"author": "whyrusleeping",
"version": "1.0.0",
"language": "go",
"gx": {
"dvcsimport": "github.com/anacrolix/envpprof"
}
}
\ No newline at end of file
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
// the SHAKE variable-output-length hash functions defined by FIPS-202.
//
// Both types of hash function use the "sponge" construction and the Keccak
// permutation. For a detailed specification see http://keccak.noekeon.org/
//
//
// Guidance
//
// If you aren't sure what function you need, use SHAKE256 with at least 64
// bytes of output. The SHAKE instances are faster than the SHA3 instances;
// the latter have to allocate memory to conform to the hash.Hash interface.
//
// If you need a secret-key MAC (message authentication code), prepend the
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
// output.
//
//
// Security strengths
//
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
// strength against preimage attacks of x bits. Since they only produce "x"
// bits of output, their collision-resistance is only "x/2" bits.
//
// The SHAKE-256 and -128 functions have a generic security strength of 256 and
// 128 bits against all attacks, provided that at least 2x bits of their output
// is used. Requesting more than 64 or 32 bytes of output, respectively, does
// not increase the collision-resistance of the SHAKE functions.
//
//
// The sponge construction
//
// A sponge builds a pseudo-random function from a public pseudo-random
// permutation, by applying the permutation to a state of "rate + capacity"
// bytes, but hiding "capacity" of the bytes.
//
// A sponge starts out with a zero state. To hash an input using a sponge, up
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
// is then "full" and the permutation is applied to "empty" it. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge in the same way, except that output
// output is copied out instead of input being XORed in.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
//
//
// Recommendations
//
// The SHAKE functions are recommended for most new uses. They can produce
// output of arbitrary length. SHAKE256, with an output length of at least
// 64 bytes, provides 256-bit security against all attacks. The Keccak team
// recommends it for most applications upgrading from SHA2-512. (NIST chose a
// much stronger, but much slower, sponge instance for SHA3-512.)
//
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
// They produce output of the same length, with the same security strengths
// against all attacks. This means, in particular, that SHA3-256 only has
// 128-bit collision resistance, because its output length is 32 bytes.
package sha3 // import "golang.org/x/crypto/sha3"
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file provides functions for creating instances of the SHA-3
// and SHAKE hash functions, as well as utility functions for hashing
// bytes.
import (
"hash"
)
// New224 creates a new SHA3-224 hash.
// Its generic security strength is 224 bits against preimage attacks,
// and 112 bits against collision attacks.
func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
// New256 creates a new SHA3-256 hash.
// Its generic security strength is 256 bits against preimage attacks,
// and 128 bits against collision attacks.
func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
// New384 creates a new SHA3-384 hash.
// Its generic security strength is 384 bits against preimage attacks,
// and 192 bits against collision attacks.
func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
// New512 creates a new SHA3-512 hash.
// Its generic security strength is 512 bits against preimage attacks,
// and 256 bits against collision attacks.
func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
h := New224()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum256 returns the SHA3-256 digest of the data.
func Sum256(data []byte) (digest [32]byte) {
h := New256()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum384 returns the SHA3-384 digest of the data.
func Sum384(data []byte) (digest [48]byte) {
h := New384()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum512 returns the SHA3-512 digest of the data.
func Sum512(data []byte) (digest [64]byte) {
h := New512()
h.Write(data)
h.Sum(digest[:0])
return
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// rc stores the round constants for use in the ι step.
var rc = [24]uint64{
0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
0x8000000080008000,
0x000000000000808B,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008A,
0x0000000000000088,
0x0000000080008009,
0x000000008000000A,
0x000000008000808B,
0x800000000000008B,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800A,
0x800000008000000A,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008,
}
// keccakF1600 applies the Keccak permutation to a 1600b-wide
// state represented as a slice of 25 uint64s.
func keccakF1600(a *[25]uint64) {
// Implementation translated from Keccak-inplace.c
// in the keccak reference code.
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
for i := 0; i < 24; i += 4 {
// Combines the 5 steps in each round into 2 steps.
// Unrolls 4 rounds per loop and spreads some steps across rounds.
// Round 1
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[6] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[12] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[18] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[24] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
a[6] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[16] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[22] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[3] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[10] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[1] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[7] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[19] ^ d4
bc3 = t<<8 | t>>(64-8)
a[20] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[11] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[23] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[4] ^ d4
bc0 = t<<27 | t>>(64-27)
a[5] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[2] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[8] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[14] ^ d4
bc2 = t<<39 | t>>(64-39)
a[15] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
// Round 2
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[16] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[7] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[23] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[14] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
a[16] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[11] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[2] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[18] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[20] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[6] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[22] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[4] ^ d4
bc3 = t<<8 | t>>(64-8)
a[15] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[1] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[8] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[24] ^ d4
bc0 = t<<27 | t>>(64-27)
a[10] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[12] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[3] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[19] ^ d4
bc2 = t<<39 | t>>(64-39)
a[5] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
// Round 3
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[11] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[22] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[8] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[19] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
a[11] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[1] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[12] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[23] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[15] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[16] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[2] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[24] ^ d4
bc3 = t<<8 | t>>(64-8)
a[5] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[6] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[3] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[14] ^ d4
bc0 = t<<27 | t>>(64-27)
a[20] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[7] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[18] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[4] ^ d4
bc2 = t<<39 | t>>(64-39)
a[10] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
// Round 4
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[1] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[2] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[3] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[4] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
a[1] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[6] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[7] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[8] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[5] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[11] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[12] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[14] ^ d4
bc3 = t<<8 | t>>(64-8)
a[10] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[16] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[18] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[19] ^ d4
bc0 = t<<27 | t>>(64-27)
a[15] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[22] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[23] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[24] ^ d4
bc2 = t<<39 | t>>(64-39)
a[20] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
}
}
{
"name": "crypto-sha3",
"author": "whyrusleeping",
"version": "1.0.0",
"language": "go",
"gx": {
"dvcsimport": "golang.org/x/crypto/sha3"
}
}
\ No newline at end of file
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package sha3
import (
"crypto"
)
func init() {
crypto.RegisterHash(crypto.SHA3_224, New224)
crypto.RegisterHash(crypto.SHA3_256, New256)
crypto.RegisterHash(crypto.SHA3_384, New384)
crypto.RegisterHash(crypto.SHA3_512, New512)
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// spongeDirection indicates the direction bytes are flowing through the sponge.
type spongeDirection int
const (
// spongeAbsorbing indicates that the sponge is absorbing input.
spongeAbsorbing spongeDirection = iota
// spongeSqueezing indicates that the sponge is being squeezed.
spongeSqueezing
)
const (
// maxRate is the maximum size of the internal buffer. SHAKE-256
// currently needs the largest buffer.
maxRate = 168
)
type state struct {
// Generic sponge components.
a [25]uint64 // main state of the hash
buf []byte // points into storage
rate int // the number of bytes of state to use
// dsbyte contains the "domain separation" bits and the first bit of
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
// SHA-3 and SHAKE functions by appending bitstrings to the message.
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
// padding rule from section 5.1 is applied to pad the message to a multiple
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
// giving 00000110b (0x06) and 00011111b (0x1f).
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
// Extendable-Output Functions (May 2014)"
dsbyte byte
storage [maxRate]byte
// Specific to SHA-3 and SHAKE.
fixedOutput bool // whether this is a fixed-ouput-length instance
outputLen int // the default output size in bytes
state spongeDirection // whether the sponge is absorbing or squeezing
}
// BlockSize returns the rate of sponge underlying this hash function.
func (d *state) BlockSize() int { return d.rate }
// Size returns the output size of the hash function in bytes.
func (d *state) Size() int { return d.outputLen }
// Reset clears the internal state by zeroing the sponge state and
// the byte buffer, and setting Sponge.state to absorbing.
func (d *state) Reset() {
// Zero the permutation's state.
for i := range d.a {
d.a[i] = 0
}
d.state = spongeAbsorbing
d.buf = d.storage[:0]
}
func (d *state) clone() *state {
ret := *d
if ret.state == spongeAbsorbing {
ret.buf = ret.storage[:len(ret.buf)]
} else {
ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
}
return &ret
}
// permute applies the KeccakF-1600 permutation. It handles
// any input-output buffering.
func (d *state) permute() {
switch d.state {
case spongeAbsorbing:
// If we're absorbing, we need to xor the input into the state
// before applying the permutation.
xorIn(d, d.buf)
d.buf = d.storage[:0]
keccakF1600(&d.a)
case spongeSqueezing:
// If we're squeezing, we need to apply the permutatin before
// copying more output.
keccakF1600(&d.a)
d.buf = d.storage[:d.rate]
copyOut(d, d.buf)
}
}
// pads appends the domain separation bits in dsbyte, applies
// the multi-bitrate 10..1 padding rule, and permutes the state.
func (d *state) padAndPermute(dsbyte byte) {
if d.buf == nil {
d.buf = d.storage[:0]
}
// Pad with this instance's domain-separator bits. We know that there's
// at least one byte of space in d.buf because, if it were full,
// permute would have been called to empty it. dsbyte also contains the
// first one bit for the padding. See the comment in the state struct.
d.buf = append(d.buf, dsbyte)
zerosStart := len(d.buf)
d.buf = d.storage[:d.rate]
for i := zerosStart; i < d.rate; i++ {
d.buf[i] = 0
}
// This adds the final one bit for the padding. Because of the way that
// bits are numbered from the LSB upwards, the final bit is the MSB of
// the last byte.
d.buf[d.rate-1] ^= 0x80
// Apply the permutation
d.permute()
d.state = spongeSqueezing
d.buf = d.storage[:d.rate]
copyOut(d, d.buf)
}
// Write absorbs more data into the hash's state. It produces an error
// if more data is written to the ShakeHash after writing
func (d *state) Write(p []byte) (written int, err error) {
if d.state != spongeAbsorbing {
panic("sha3: write to sponge after read")
}
if d.buf == nil {
d.buf = d.storage[:0]
}
written = len(p)
for len(p) > 0 {
if len(d.buf) == 0 && len(p) >= d.rate {
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
xorIn(d, p[:d.rate])
p = p[d.rate:]
keccakF1600(&d.a)
} else {
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
todo := d.rate - len(d.buf)
if todo > len(p) {
todo = len(p)
}
d.buf = append(d.buf, p[:todo]...)
p = p[todo:]
// If the sponge is full, apply the permutation.
if len(d.buf) == d.rate {
d.permute()
}
}
}
return
}
// Read squeezes an arbitrary number of bytes from the sponge.
func (d *state) Read(out []byte) (n int, err error) {
// If we're still absorbing, pad and apply the permutation.
if d.state == spongeAbsorbing {
d.padAndPermute(d.dsbyte)
}
n = len(out)
// Now, do the squeezing.
for len(out) > 0 {
n := copy(out, d.buf)
d.buf = d.buf[n:]
out = out[n:]
// Apply the permutation if we've squeezed the sponge dry.
if len(d.buf) == 0 {
d.permute()
}
}
return
}
// Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes.
func (d *state) Sum(in []byte) []byte {
// Make a copy of the original hash so that caller can keep writing
// and summing.
dup := d.clone()
hash := make([]byte, dup.outputLen)
dup.Read(hash)
return append(in, hash...)
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// Tests include all the ShortMsgKATs provided by the Keccak team at
// https://github.com/gvanas/KeccakCodePackage
//
// They only include the zero-bit case of the bitwise testvectors
// published by NIST in the draft of FIPS-202.
import (
"bytes"
"compress/flate"
"encoding/hex"
"encoding/json"
"hash"
"os"
"strings"
"testing"
)
const (
testString = "brekeccakkeccak koax koax"
katFilename = "testdata/keccakKats.json.deflate"
)
// Internal-use instances of SHAKE used to test against KATs.
func newHashShake128() hash.Hash {
return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
}
func newHashShake256() hash.Hash {
return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
}
// testDigests contains functions returning hash.Hash instances
// with output-length equal to the KAT length for both SHA-3 and
// SHAKE instances.
var testDigests = map[string]func() hash.Hash{
"SHA3-224": New224,
"SHA3-256": New256,
"SHA3-384": New384,
"SHA3-512": New512,
"SHAKE128": newHashShake128,
"SHAKE256": newHashShake256,
}
// testShakes contains functions that return ShakeHash instances for
// testing the ShakeHash-specific interface.
var testShakes = map[string]func() ShakeHash{
"SHAKE128": NewShake128,
"SHAKE256": NewShake256,
}
// decodeHex converts a hex-encoded string into a raw byte string.
func decodeHex(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
return b
}
// structs used to marshal JSON test-cases.
type KeccakKats struct {
Kats map[string][]struct {
Digest string `json:"digest"`
Length int64 `json:"length"`
Message string `json:"message"`
}
}
func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) {
xorInOrig, copyOutOrig := xorIn, copyOut
xorIn, copyOut = xorInGeneric, copyOutGeneric
testf("generic")
if xorImplementationUnaligned != "generic" {
xorIn, copyOut = xorInUnaligned, copyOutUnaligned
testf("unaligned")
}
xorIn, copyOut = xorInOrig, copyOutOrig
}
// TestKeccakKats tests the SHA-3 and Shake implementations against all the
// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
// (The testvectors are stored in keccakKats.json.deflate due to their length.)
func TestKeccakKats(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
// Read the KATs.
deflated, err := os.Open(katFilename)
if err != nil {
t.Errorf("error opening %s: %s", katFilename, err)
}
file := flate.NewReader(deflated)
dec := json.NewDecoder(file)
var katSet KeccakKats
err = dec.Decode(&katSet)
if err != nil {
t.Errorf("error decoding KATs: %s", err)
}
// Do the KATs.
for functionName, kats := range katSet.Kats {
d := testDigests[functionName]()
for _, kat := range kats {
d.Reset()
in, err := hex.DecodeString(kat.Message)
if err != nil {
t.Errorf("error decoding KAT: %s", err)
}
d.Write(in[:kat.Length/8])
got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
if got != kat.Digest {
t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s",
functionName, impl, kat.Length, kat.Message, got, kat.Digest)
t.Logf("wanted %+v", kat)
t.FailNow()
}
continue
}
}
})
}
// TestUnalignedWrite tests that writing data in an arbitrary pattern with
// small input buffers.
func testUnalignedWrite(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
buf := sequentialBytes(0x10000)
for alg, df := range testDigests {
d := df()
d.Reset()
d.Write(buf)
want := d.Sum(nil)
d.Reset()
for i := 0; i < len(buf); {
// Cycle through offsets which make a 137 byte sequence.
// Because 137 is prime this sequence should exercise all corner cases.
offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
for _, j := range offsets {
if v := len(buf) - i; v < j {
j = v
}
d.Write(buf[i : i+j])
i += j
}
}
got := d.Sum(nil)
if !bytes.Equal(got, want) {
t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want)
}
}
})
}
// TestAppend checks that appending works when reallocation is necessary.
func TestAppend(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
d := New224()
for capacity := 2; capacity <= 66; capacity += 64 {
// The first time around the loop, Sum will have to reallocate.
// The second time, it will not.
buf := make([]byte, 2, capacity)
d.Reset()
d.Write([]byte{0xcc})
buf = d.Sum(buf)
expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
t.Errorf("got %s, want %s", got, expected)
}
}
})
}
// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
func TestAppendNoRealloc(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
buf := make([]byte, 1, 200)
d := New224()
d.Write([]byte{0xcc})
buf = d.Sum(buf)
expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
t.Errorf("%s: got %s, want %s", impl, got, expected)
}
})
}
// TestSqueezing checks that squeezing the full output a single time produces
// the same output as repeatedly squeezing the instance.
func TestSqueezing(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
for functionName, newShakeHash := range testShakes {
d0 := newShakeHash()
d0.Write([]byte(testString))
ref := make([]byte, 32)
d0.Read(ref)
d1 := newShakeHash()
d1.Write([]byte(testString))
var multiple []byte
for _ = range ref {
one := make([]byte, 1)
d1.Read(one)
multiple = append(multiple, one...)
}
if !bytes.Equal(ref, multiple) {
t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
}
}
})
}
// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
func sequentialBytes(size int) []byte {
result := make([]byte, size)
for i := range result {
result[i] = byte(i)
}
return result
}
// BenchmarkPermutationFunction measures the speed of the permutation function
// with no input data.
func BenchmarkPermutationFunction(b *testing.B) {
b.SetBytes(int64(200))
var lanes [25]uint64
for i := 0; i < b.N; i++ {
keccakF1600(&lanes)
}
}
// benchmarkHash tests the speed to hash num buffers of buflen each.
func benchmarkHash(b *testing.B, h hash.Hash, size, num int) {
b.StopTimer()
h.Reset()
data := sequentialBytes(size)
b.SetBytes(int64(size * num))
b.StartTimer()
var state []byte
for i := 0; i < b.N; i++ {
for j := 0; j < num; j++ {
h.Write(data)
}
state = h.Sum(state[:0])
}
b.StopTimer()
h.Reset()
}
// benchmarkShake is specialized to the Shake instances, which don't
// require a copy on reading output.
func benchmarkShake(b *testing.B, h ShakeHash, size, num int) {
b.StopTimer()
h.Reset()
data := sequentialBytes(size)
d := make([]byte, 32)
b.SetBytes(int64(size * num))
b.StartTimer()
for i := 0; i < b.N; i++ {
h.Reset()
for j := 0; j < num; j++ {
h.Write(data)
}
h.Read(d)
}
}
func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) }
func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) }
func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) }
func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) }
func BenchmarkShake128_MTU(b *testing.B) { benchmarkShake(b, NewShake128(), 1350, 1) }
func BenchmarkShake256_MTU(b *testing.B) { benchmarkShake(b, NewShake256(), 1350, 1) }
func BenchmarkShake256_16x(b *testing.B) { benchmarkShake(b, NewShake256(), 16, 1024) }
func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) }
func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) }
func Example_sum() {
buf := []byte("some data to hash")
// A hash needs to be 64 bytes long to have 256-bit collision resistance.
h := make([]byte, 64)
// Compute a 64-byte hash of buf and put it in h.
ShakeSum256(h, buf)
}
func Example_mac() {
k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long")
buf := []byte("and this is some data to authenticate")
// A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
h := make([]byte, 32)
d := NewShake256()
// Write the key into the hash.
d.Write(k)
// Now write the data.
d.Write(buf)
// Read 32 bytes of output from the hash into h.
d.Read(h)
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file defines the ShakeHash interface, and provides
// functions for creating SHAKE instances, as well as utility
// functions for hashing bytes to arbitrary-length output.
import (
"io"
)
// ShakeHash defines the interface to hash functions that
// support arbitrary-length output.
type ShakeHash interface {
// Write absorbs more data into the hash's state. It panics if input is
// written to it after output has been read from it.
io.Writer
// Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error.
io.Reader
// Clone returns a copy of the ShakeHash in its current state.
Clone() ShakeHash
// Reset resets the ShakeHash to its initial state.
Reset()
}
func (d *state) Clone() ShakeHash {
return d.clone()
}
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 256 bits against all attacks if
// at least 64 bytes of its output are used.
func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
// ShakeSum128 writes an arbitrary-length digest of data into hash.
func ShakeSum128(hash, data []byte) {
h := NewShake128()
h.Write(data)
h.Read(hash)
}
// ShakeSum256 writes an arbitrary-length digest of data into hash.
func ShakeSum256(hash, data []byte) {
h := NewShake256()
h.Write(data)
h.Read(hash)
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment