Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
go-libp2p
Commits
ebd0a2df
Commit
ebd0a2df
authored
Nov 15, 2015
by
Jeromy
Browse files
remove multiple multihash deps
parent
4a64aae7
Changes
84
Show whitespace changes
Inline
Side-by-side
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/package.json
deleted
100644 → 0
View file @
4a64aae7
{
"name"
:
"crypto-sha3"
,
"author"
:
"whyrusleeping"
,
"version"
:
"1.0.0"
,
"language"
:
"go"
,
"gx"
:
{
"dvcsimport"
:
"golang.org/x/crypto/sha3"
}
}
\ No newline at end of file
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/register.go
deleted
100644 → 0
View file @
4a64aae7
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package
sha3
import
(
"crypto"
)
func
init
()
{
crypto
.
RegisterHash
(
crypto
.
SHA3_224
,
New224
)
crypto
.
RegisterHash
(
crypto
.
SHA3_256
,
New256
)
crypto
.
RegisterHash
(
crypto
.
SHA3_384
,
New384
)
crypto
.
RegisterHash
(
crypto
.
SHA3_512
,
New512
)
}
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/sha3.go
deleted
100644 → 0
View file @
4a64aae7
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package
sha3
// spongeDirection indicates the direction bytes are flowing through the sponge.
type
spongeDirection
int
const
(
// spongeAbsorbing indicates that the sponge is absorbing input.
spongeAbsorbing
spongeDirection
=
iota
// spongeSqueezing indicates that the sponge is being squeezed.
spongeSqueezing
)
const
(
// maxRate is the maximum size of the internal buffer. SHAKE-256
// currently needs the largest buffer.
maxRate
=
168
)
type
state
struct
{
// Generic sponge components.
a
[
25
]
uint64
// main state of the hash
buf
[]
byte
// points into storage
rate
int
// the number of bytes of state to use
// dsbyte contains the "domain separation" bits and the first bit of
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
// SHA-3 and SHAKE functions by appending bitstrings to the message.
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
// padding rule from section 5.1 is applied to pad the message to a multiple
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
// giving 00000110b (0x06) and 00011111b (0x1f).
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
// Extendable-Output Functions (May 2014)"
dsbyte
byte
storage
[
maxRate
]
byte
// Specific to SHA-3 and SHAKE.
fixedOutput
bool
// whether this is a fixed-ouput-length instance
outputLen
int
// the default output size in bytes
state
spongeDirection
// whether the sponge is absorbing or squeezing
}
// BlockSize returns the rate of sponge underlying this hash function.
func
(
d
*
state
)
BlockSize
()
int
{
return
d
.
rate
}
// Size returns the output size of the hash function in bytes.
func
(
d
*
state
)
Size
()
int
{
return
d
.
outputLen
}
// Reset clears the internal state by zeroing the sponge state and
// the byte buffer, and setting Sponge.state to absorbing.
func
(
d
*
state
)
Reset
()
{
// Zero the permutation's state.
for
i
:=
range
d
.
a
{
d
.
a
[
i
]
=
0
}
d
.
state
=
spongeAbsorbing
d
.
buf
=
d
.
storage
[
:
0
]
}
func
(
d
*
state
)
clone
()
*
state
{
ret
:=
*
d
if
ret
.
state
==
spongeAbsorbing
{
ret
.
buf
=
ret
.
storage
[
:
len
(
ret
.
buf
)]
}
else
{
ret
.
buf
=
ret
.
storage
[
d
.
rate
-
cap
(
d
.
buf
)
:
d
.
rate
]
}
return
&
ret
}
// permute applies the KeccakF-1600 permutation. It handles
// any input-output buffering.
func
(
d
*
state
)
permute
()
{
switch
d
.
state
{
case
spongeAbsorbing
:
// If we're absorbing, we need to xor the input into the state
// before applying the permutation.
xorIn
(
d
,
d
.
buf
)
d
.
buf
=
d
.
storage
[
:
0
]
keccakF1600
(
&
d
.
a
)
case
spongeSqueezing
:
// If we're squeezing, we need to apply the permutatin before
// copying more output.
keccakF1600
(
&
d
.
a
)
d
.
buf
=
d
.
storage
[
:
d
.
rate
]
copyOut
(
d
,
d
.
buf
)
}
}
// pads appends the domain separation bits in dsbyte, applies
// the multi-bitrate 10..1 padding rule, and permutes the state.
func
(
d
*
state
)
padAndPermute
(
dsbyte
byte
)
{
if
d
.
buf
==
nil
{
d
.
buf
=
d
.
storage
[
:
0
]
}
// Pad with this instance's domain-separator bits. We know that there's
// at least one byte of space in d.buf because, if it were full,
// permute would have been called to empty it. dsbyte also contains the
// first one bit for the padding. See the comment in the state struct.
d
.
buf
=
append
(
d
.
buf
,
dsbyte
)
zerosStart
:=
len
(
d
.
buf
)
d
.
buf
=
d
.
storage
[
:
d
.
rate
]
for
i
:=
zerosStart
;
i
<
d
.
rate
;
i
++
{
d
.
buf
[
i
]
=
0
}
// This adds the final one bit for the padding. Because of the way that
// bits are numbered from the LSB upwards, the final bit is the MSB of
// the last byte.
d
.
buf
[
d
.
rate
-
1
]
^=
0x80
// Apply the permutation
d
.
permute
()
d
.
state
=
spongeSqueezing
d
.
buf
=
d
.
storage
[
:
d
.
rate
]
copyOut
(
d
,
d
.
buf
)
}
// Write absorbs more data into the hash's state. It produces an error
// if more data is written to the ShakeHash after writing
func
(
d
*
state
)
Write
(
p
[]
byte
)
(
written
int
,
err
error
)
{
if
d
.
state
!=
spongeAbsorbing
{
panic
(
"sha3: write to sponge after read"
)
}
if
d
.
buf
==
nil
{
d
.
buf
=
d
.
storage
[
:
0
]
}
written
=
len
(
p
)
for
len
(
p
)
>
0
{
if
len
(
d
.
buf
)
==
0
&&
len
(
p
)
>=
d
.
rate
{
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
xorIn
(
d
,
p
[
:
d
.
rate
])
p
=
p
[
d
.
rate
:
]
keccakF1600
(
&
d
.
a
)
}
else
{
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
todo
:=
d
.
rate
-
len
(
d
.
buf
)
if
todo
>
len
(
p
)
{
todo
=
len
(
p
)
}
d
.
buf
=
append
(
d
.
buf
,
p
[
:
todo
]
...
)
p
=
p
[
todo
:
]
// If the sponge is full, apply the permutation.
if
len
(
d
.
buf
)
==
d
.
rate
{
d
.
permute
()
}
}
}
return
}
// Read squeezes an arbitrary number of bytes from the sponge.
func
(
d
*
state
)
Read
(
out
[]
byte
)
(
n
int
,
err
error
)
{
// If we're still absorbing, pad and apply the permutation.
if
d
.
state
==
spongeAbsorbing
{
d
.
padAndPermute
(
d
.
dsbyte
)
}
n
=
len
(
out
)
// Now, do the squeezing.
for
len
(
out
)
>
0
{
n
:=
copy
(
out
,
d
.
buf
)
d
.
buf
=
d
.
buf
[
n
:
]
out
=
out
[
n
:
]
// Apply the permutation if we've squeezed the sponge dry.
if
len
(
d
.
buf
)
==
0
{
d
.
permute
()
}
}
return
}
// Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes.
func
(
d
*
state
)
Sum
(
in
[]
byte
)
[]
byte
{
// Make a copy of the original hash so that caller can keep writing
// and summing.
dup
:=
d
.
clone
()
hash
:=
make
([]
byte
,
dup
.
outputLen
)
dup
.
Read
(
hash
)
return
append
(
in
,
hash
...
)
}
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/sha3_test.go
deleted
100644 → 0
View file @
4a64aae7
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package
sha3
// Tests include all the ShortMsgKATs provided by the Keccak team at
// https://github.com/gvanas/KeccakCodePackage
//
// They only include the zero-bit case of the bitwise testvectors
// published by NIST in the draft of FIPS-202.
import
(
"bytes"
"compress/flate"
"encoding/hex"
"encoding/json"
"hash"
"os"
"strings"
"testing"
)
const
(
testString
=
"brekeccakkeccak koax koax"
katFilename
=
"testdata/keccakKats.json.deflate"
)
// Internal-use instances of SHAKE used to test against KATs.
func
newHashShake128
()
hash
.
Hash
{
return
&
state
{
rate
:
168
,
dsbyte
:
0x1f
,
outputLen
:
512
}
}
func
newHashShake256
()
hash
.
Hash
{
return
&
state
{
rate
:
136
,
dsbyte
:
0x1f
,
outputLen
:
512
}
}
// testDigests contains functions returning hash.Hash instances
// with output-length equal to the KAT length for both SHA-3 and
// SHAKE instances.
var
testDigests
=
map
[
string
]
func
()
hash
.
Hash
{
"SHA3-224"
:
New224
,
"SHA3-256"
:
New256
,
"SHA3-384"
:
New384
,
"SHA3-512"
:
New512
,
"SHAKE128"
:
newHashShake128
,
"SHAKE256"
:
newHashShake256
,
}
// testShakes contains functions that return ShakeHash instances for
// testing the ShakeHash-specific interface.
var
testShakes
=
map
[
string
]
func
()
ShakeHash
{
"SHAKE128"
:
NewShake128
,
"SHAKE256"
:
NewShake256
,
}
// decodeHex converts a hex-encoded string into a raw byte string.
func
decodeHex
(
s
string
)
[]
byte
{
b
,
err
:=
hex
.
DecodeString
(
s
)
if
err
!=
nil
{
panic
(
err
)
}
return
b
}
// structs used to marshal JSON test-cases.
type
KeccakKats
struct
{
Kats
map
[
string
][]
struct
{
Digest
string
`json:"digest"`
Length
int64
`json:"length"`
Message
string
`json:"message"`
}
}
func
testUnalignedAndGeneric
(
t
*
testing
.
T
,
testf
func
(
impl
string
))
{
xorInOrig
,
copyOutOrig
:=
xorIn
,
copyOut
xorIn
,
copyOut
=
xorInGeneric
,
copyOutGeneric
testf
(
"generic"
)
if
xorImplementationUnaligned
!=
"generic"
{
xorIn
,
copyOut
=
xorInUnaligned
,
copyOutUnaligned
testf
(
"unaligned"
)
}
xorIn
,
copyOut
=
xorInOrig
,
copyOutOrig
}
// TestKeccakKats tests the SHA-3 and Shake implementations against all the
// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
// (The testvectors are stored in keccakKats.json.deflate due to their length.)
func
TestKeccakKats
(
t
*
testing
.
T
)
{
testUnalignedAndGeneric
(
t
,
func
(
impl
string
)
{
// Read the KATs.
deflated
,
err
:=
os
.
Open
(
katFilename
)
if
err
!=
nil
{
t
.
Errorf
(
"error opening %s: %s"
,
katFilename
,
err
)
}
file
:=
flate
.
NewReader
(
deflated
)
dec
:=
json
.
NewDecoder
(
file
)
var
katSet
KeccakKats
err
=
dec
.
Decode
(
&
katSet
)
if
err
!=
nil
{
t
.
Errorf
(
"error decoding KATs: %s"
,
err
)
}
// Do the KATs.
for
functionName
,
kats
:=
range
katSet
.
Kats
{
d
:=
testDigests
[
functionName
]()
for
_
,
kat
:=
range
kats
{
d
.
Reset
()
in
,
err
:=
hex
.
DecodeString
(
kat
.
Message
)
if
err
!=
nil
{
t
.
Errorf
(
"error decoding KAT: %s"
,
err
)
}
d
.
Write
(
in
[
:
kat
.
Length
/
8
])
got
:=
strings
.
ToUpper
(
hex
.
EncodeToString
(
d
.
Sum
(
nil
)))
if
got
!=
kat
.
Digest
{
t
.
Errorf
(
"function=%s, implementation=%s, length=%d
\n
message:
\n
%s
\n
got:
\n
%s
\n
wanted:
\n
%s"
,
functionName
,
impl
,
kat
.
Length
,
kat
.
Message
,
got
,
kat
.
Digest
)
t
.
Logf
(
"wanted %+v"
,
kat
)
t
.
FailNow
()
}
continue
}
}
})
}
// TestUnalignedWrite tests that writing data in an arbitrary pattern with
// small input buffers.
func
testUnalignedWrite
(
t
*
testing
.
T
)
{
testUnalignedAndGeneric
(
t
,
func
(
impl
string
)
{
buf
:=
sequentialBytes
(
0x10000
)
for
alg
,
df
:=
range
testDigests
{
d
:=
df
()
d
.
Reset
()
d
.
Write
(
buf
)
want
:=
d
.
Sum
(
nil
)
d
.
Reset
()
for
i
:=
0
;
i
<
len
(
buf
);
{
// Cycle through offsets which make a 137 byte sequence.
// Because 137 is prime this sequence should exercise all corner cases.
offsets
:=
[
17
]
int
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
,
1
}
for
_
,
j
:=
range
offsets
{
if
v
:=
len
(
buf
)
-
i
;
v
<
j
{
j
=
v
}
d
.
Write
(
buf
[
i
:
i
+
j
])
i
+=
j
}
}
got
:=
d
.
Sum
(
nil
)
if
!
bytes
.
Equal
(
got
,
want
)
{
t
.
Errorf
(
"Unaligned writes, implementation=%s, alg=%s
\n
got %q, want %q"
,
impl
,
alg
,
got
,
want
)
}
}
})
}
// TestAppend checks that appending works when reallocation is necessary.
func
TestAppend
(
t
*
testing
.
T
)
{
testUnalignedAndGeneric
(
t
,
func
(
impl
string
)
{
d
:=
New224
()
for
capacity
:=
2
;
capacity
<=
66
;
capacity
+=
64
{
// The first time around the loop, Sum will have to reallocate.
// The second time, it will not.
buf
:=
make
([]
byte
,
2
,
capacity
)
d
.
Reset
()
d
.
Write
([]
byte
{
0xcc
})
buf
=
d
.
Sum
(
buf
)
expected
:=
"0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
if
got
:=
strings
.
ToUpper
(
hex
.
EncodeToString
(
buf
));
got
!=
expected
{
t
.
Errorf
(
"got %s, want %s"
,
got
,
expected
)
}
}
})
}
// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
func
TestAppendNoRealloc
(
t
*
testing
.
T
)
{
testUnalignedAndGeneric
(
t
,
func
(
impl
string
)
{
buf
:=
make
([]
byte
,
1
,
200
)
d
:=
New224
()
d
.
Write
([]
byte
{
0xcc
})
buf
=
d
.
Sum
(
buf
)
expected
:=
"00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
if
got
:=
strings
.
ToUpper
(
hex
.
EncodeToString
(
buf
));
got
!=
expected
{
t
.
Errorf
(
"%s: got %s, want %s"
,
impl
,
got
,
expected
)
}
})
}
// TestSqueezing checks that squeezing the full output a single time produces
// the same output as repeatedly squeezing the instance.
func
TestSqueezing
(
t
*
testing
.
T
)
{
testUnalignedAndGeneric
(
t
,
func
(
impl
string
)
{
for
functionName
,
newShakeHash
:=
range
testShakes
{
d0
:=
newShakeHash
()
d0
.
Write
([]
byte
(
testString
))
ref
:=
make
([]
byte
,
32
)
d0
.
Read
(
ref
)
d1
:=
newShakeHash
()
d1
.
Write
([]
byte
(
testString
))
var
multiple
[]
byte
for
_
=
range
ref
{
one
:=
make
([]
byte
,
1
)
d1
.
Read
(
one
)
multiple
=
append
(
multiple
,
one
...
)
}
if
!
bytes
.
Equal
(
ref
,
multiple
)
{
t
.
Errorf
(
"%s (%s): squeezing %d bytes one at a time failed"
,
functionName
,
impl
,
len
(
ref
))
}
}
})
}
// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
func
sequentialBytes
(
size
int
)
[]
byte
{
result
:=
make
([]
byte
,
size
)
for
i
:=
range
result
{
result
[
i
]
=
byte
(
i
)
}
return
result
}
// BenchmarkPermutationFunction measures the speed of the permutation function
// with no input data.
func
BenchmarkPermutationFunction
(
b
*
testing
.
B
)
{
b
.
SetBytes
(
int64
(
200
))
var
lanes
[
25
]
uint64
for
i
:=
0
;
i
<
b
.
N
;
i
++
{
keccakF1600
(
&
lanes
)
}
}
// benchmarkHash tests the speed to hash num buffers of buflen each.
func
benchmarkHash
(
b
*
testing
.
B
,
h
hash
.
Hash
,
size
,
num
int
)
{
b
.
StopTimer
()
h
.
Reset
()
data
:=
sequentialBytes
(
size
)
b
.
SetBytes
(
int64
(
size
*
num
))
b
.
StartTimer
()
var
state
[]
byte
for
i
:=
0
;
i
<
b
.
N
;
i
++
{
for
j
:=
0
;
j
<
num
;
j
++
{
h
.
Write
(
data
)
}
state
=
h
.
Sum
(
state
[
:
0
])
}
b
.
StopTimer
()
h
.
Reset
()
}
// benchmarkShake is specialized to the Shake instances, which don't
// require a copy on reading output.
func
benchmarkShake
(
b
*
testing
.
B
,
h
ShakeHash
,
size
,
num
int
)
{
b
.
StopTimer
()
h
.
Reset
()
data
:=
sequentialBytes
(
size
)
d
:=
make
([]
byte
,
32
)
b
.
SetBytes
(
int64
(
size
*
num
))
b
.
StartTimer
()
for
i
:=
0
;
i
<
b
.
N
;
i
++
{
h
.
Reset
()
for
j
:=
0
;
j
<
num
;
j
++
{
h
.
Write
(
data
)
}
h
.
Read
(
d
)
}
}
func
BenchmarkSha3_512_MTU
(
b
*
testing
.
B
)
{
benchmarkHash
(
b
,
New512
(),
1350
,
1
)
}
func
BenchmarkSha3_384_MTU
(
b
*
testing
.
B
)
{
benchmarkHash
(
b
,
New384
(),
1350
,
1
)
}
func
BenchmarkSha3_256_MTU
(
b
*
testing
.
B
)
{
benchmarkHash
(
b
,
New256
(),
1350
,
1
)
}
func
BenchmarkSha3_224_MTU
(
b
*
testing
.
B
)
{
benchmarkHash
(
b
,
New224
(),
1350
,
1
)
}
func
BenchmarkShake128_MTU
(
b
*
testing
.
B
)
{
benchmarkShake
(
b
,
NewShake128
(),
1350
,
1
)
}
func
BenchmarkShake256_MTU
(
b
*
testing
.
B
)
{
benchmarkShake
(
b
,
NewShake256
(),
1350
,
1
)
}
func
BenchmarkShake256_16x
(
b
*
testing
.
B
)
{
benchmarkShake
(
b
,
NewShake256
(),
16
,
1024
)
}
func
BenchmarkShake256_1MiB
(
b
*
testing
.
B
)
{
benchmarkShake
(
b
,
NewShake256
(),
1024
,
1024
)
}
func
BenchmarkSha3_512_1MiB
(
b
*
testing
.
B
)
{
benchmarkHash
(
b
,
New512
(),
1024
,
1024
)
}
func
Example_sum
()
{
buf
:=
[]
byte
(
"some data to hash"
)
// A hash needs to be 64 bytes long to have 256-bit collision resistance.
h
:=
make
([]
byte
,
64
)
// Compute a 64-byte hash of buf and put it in h.
ShakeSum256
(
h
,
buf
)
}
func
Example_mac
()
{
k
:=
[]
byte
(
"this is a secret key; you should generate a strong random key that's at least 32 bytes long"
)
buf
:=
[]
byte
(
"and this is some data to authenticate"
)
// A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
h
:=
make
([]
byte
,
32
)
d
:=
NewShake256
()
// Write the key into the hash.
d
.
Write
(
k
)
// Now write the data.
d
.
Write
(
buf
)
// Read 32 bytes of output from the hash into h.
d
.
Read
(
h
)
}
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/shake.go
deleted
100644 → 0
View file @
4a64aae7
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package
sha3
// This file defines the ShakeHash interface, and provides
// functions for creating SHAKE instances, as well as utility
// functions for hashing bytes to arbitrary-length output.
import
(
"io"
)
// ShakeHash defines the interface to hash functions that
// support arbitrary-length output.
type
ShakeHash
interface
{
// Write absorbs more data into the hash's state. It panics if input is
// written to it after output has been read from it.
io
.
Writer
// Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error.
io
.
Reader
// Clone returns a copy of the ShakeHash in its current state.
Clone
()
ShakeHash
// Reset resets the ShakeHash to its initial state.
Reset
()
}
func
(
d
*
state
)
Clone
()
ShakeHash
{
return
d
.
clone
()
}
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
func
NewShake128
()
ShakeHash
{
return
&
state
{
rate
:
168
,
dsbyte
:
0x1f
}
}
// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 256 bits against all attacks if
// at least 64 bytes of its output are used.
func
NewShake256
()
ShakeHash
{
return
&
state
{
rate
:
136
,
dsbyte
:
0x1f
}
}
// ShakeSum128 writes an arbitrary-length digest of data into hash.
func
ShakeSum128
(
hash
,
data
[]
byte
)
{
h
:=
NewShake128
()
h
.
Write
(
data
)
h
.
Read
(
hash
)
}
// ShakeSum256 writes an arbitrary-length digest of data into hash.
func
ShakeSum256
(
hash
,
data
[]
byte
)
{
h
:=
NewShake256
()
h
.
Write
(
data
)
h
.
Read
(
hash
)
}
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/testdata/keccakKats.json.deflate
deleted
100644 → 0
View file @
4a64aae7
File deleted
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/xor.go
deleted
100644 → 0
View file @
4a64aae7
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!386 appengine
package
sha3
var
(
xorIn
=
xorInGeneric
copyOut
=
copyOutGeneric
xorInUnaligned
=
xorInGeneric
copyOutUnaligned
=
copyOutGeneric
)
const
xorImplementationUnaligned
=
"generic"
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/xor_generic.go
deleted
100644 → 0
View file @
4a64aae7
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package
sha3
import
"encoding/binary"
// xorInGeneric xors the bytes in buf into the state; it
// makes no non-portable assumptions about memory layout
// or alignment.
func
xorInGeneric
(
d
*
state
,
buf
[]
byte
)
{
n
:=
len
(
buf
)
/
8
for
i
:=
0
;
i
<
n
;
i
++
{
a
:=
binary
.
LittleEndian
.
Uint64
(
buf
)
d
.
a
[
i
]
^=
a
buf
=
buf
[
8
:
]
}
}
// copyOutGeneric copies ulint64s to a byte buffer.
func
copyOutGeneric
(
d
*
state
,
b
[]
byte
)
{
for
i
:=
0
;
len
(
b
)
>=
8
;
i
++
{
binary
.
LittleEndian
.
PutUint64
(
b
,
d
.
a
[
i
])
b
=
b
[
8
:
]
}
}
vendor/QmY1q6BMPywiUXEKAjehsgmPaBeLHTzs3FNaptUsbmpngb/crypto-sha3/xor_unaligned.go
deleted
100644 → 0
View file @
4a64aae7
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 386
// +build !appengine
package
sha3
import
"unsafe"
func
xorInUnaligned
(
d
*
state
,
buf
[]
byte
)
{
bw
:=
(
*
[
maxRate
/
8
]
uint64
)(
unsafe
.
Pointer
(
&
buf
[
0
]))
n
:=
len
(
buf
)
if
n
>=
72
{
d
.
a
[
0
]
^=
bw
[
0
]
d
.
a
[
1
]
^=
bw
[
1
]
d
.
a
[
2
]
^=
bw
[
2
]
d
.
a
[
3
]
^=
bw
[
3
]
d
.
a
[
4
]
^=
bw
[
4
]
d
.
a
[
5
]
^=
bw
[
5
]
d
.
a
[
6
]
^=
bw
[
6
]
d
.
a
[
7
]
^=
bw
[
7
]
d
.
a
[
8
]
^=
bw
[
8
]
}
if
n
>=
104
{
d
.
a
[
9
]
^=
bw
[
9
]
d
.
a
[
10
]
^=
bw
[
10
]
d
.
a
[
11
]
^=
bw
[
11
]
d
.
a
[
12
]
^=
bw
[
12
]
}
if
n
>=
136
{
d
.
a
[
13
]
^=
bw
[
13
]
d
.
a
[
14
]
^=
bw
[
14
]
d
.
a
[
15
]
^=
bw
[
15
]
d
.
a
[
16
]
^=
bw
[
16
]
}
if
n
>=
144
{
d
.
a
[
17
]
^=
bw
[
17
]
}
if
n
>=
168
{
d
.
a
[
18
]
^=
bw
[
18
]
d
.
a
[
19
]
^=
bw
[
19
]
d
.
a
[
20
]
^=
bw
[
20
]
}
}
func
copyOutUnaligned
(
d
*
state
,
buf
[]
byte
)
{
ab
:=
(
*
[
maxRate
]
uint8
)(
unsafe
.
Pointer
(
&
d
.
a
[
0
]))
copy
(
buf
,
ab
[
:
])
}
var
(
xorIn
=
xorInUnaligned
copyOut
=
copyOutUnaligned
)
const
xorImplementationUnaligned
=
"unaligned"
vendor/QmYWqTn1i8yv9QRDzGPJ2yRudKzYCaC5Aqasbm8vwaG92E/multiaddr-filter/.gxlastpubver
deleted
100644 → 0
View file @
4a64aae7
1.0.0: QmVdADza4QFVAR9xqAxRQjt9vTZJ6UrVLgBstKua1Xg7he
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/.travis.yml
deleted
100644 → 0
View file @
4a64aae7
language
:
go
go
:
-
1.3
-
release
-
tip
script
:
-
make test
env
:
TEST_VERBOSE=1
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/LICENSE
deleted
100644 → 0
View file @
4a64aae7
The MIT License (MIT)
Copyright (c) 2014 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/Makefile
deleted
100644 → 0
View file @
4a64aae7
test
:
go_test other_tests
other_tests
:
cd test
&&
make
test
go_test
:
go_deps
go
test
-race
-cpu
=
5
-v
./...
go_deps
:
go get golang.org/x/crypto/sha3
go get github.com/jbenet/go-base58
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/README.md
deleted
100644 → 0
View file @
4a64aae7
# go-multihash
![
travis
](
https://travis-ci.org/jbenet/go-multihash.svg
)
[
multihash
](
//github.com/jbenet/multihash
)
implementation in Go.
## Example
```
go
package
main
import
(
"encoding/hex"
"fmt"
"github.com/jbenet/go-multihash"
)
func
main
()
{
// ignores errors for simplicity.
// don't do that at home.
buf
,
_
:=
hex
.
DecodeString
(
"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"
)
mhbuf
,
_
:=
multihash
.
EncodeName
(
buf
,
"sha1"
);
mhhex
:=
hex
.
EncodeToString
(
mhbuf
)
fmt
.
Printf
(
"hex: %v
\n
"
,
mhhex
);
o
,
_
:=
multihash
.
Decode
(
mhbuf
);
mhhex
=
hex
.
EncodeToString
(
o
.
Digest
);
fmt
.
Printf
(
"obj: %v 0x%x %d %s
\n
"
,
o
.
Name
,
o
.
Code
,
o
.
Length
,
mhhex
);
}
```
Run
[
test/foo.go
](
test/foo.go
)
```
> cd test/
> go build
> ./test
hex: 11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33
obj: sha1 0x11 20 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33
```
## License
MIT
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/io.go
deleted
100644 → 0
View file @
4a64aae7
package
multihash
import
(
"fmt"
"io"
)
// Reader is an io.Reader wrapper that exposes a function
// to read a whole multihash, parse it, and return it.
type
Reader
interface
{
io
.
Reader
ReadMultihash
()
(
Multihash
,
error
)
}
// Writer is an io.Writer wrapper that exposes a function
// to write a whole multihash.
type
Writer
interface
{
io
.
Writer
WriteMultihash
(
Multihash
)
error
}
// NewReader wraps an io.Reader with a multihash.Reader
func
NewReader
(
r
io
.
Reader
)
Reader
{
return
&
mhReader
{
r
}
}
// NewWriter wraps an io.Writer with a multihash.Writer
func
NewWriter
(
w
io
.
Writer
)
Writer
{
return
&
mhWriter
{
w
}
}
type
mhReader
struct
{
r
io
.
Reader
}
func
(
r
*
mhReader
)
Read
(
buf
[]
byte
)
(
n
int
,
err
error
)
{
return
r
.
r
.
Read
(
buf
)
}
func
(
r
*
mhReader
)
ReadMultihash
()
(
Multihash
,
error
)
{
mhhdr
:=
make
([]
byte
,
2
)
if
_
,
err
:=
io
.
ReadFull
(
r
.
r
,
mhhdr
);
err
!=
nil
{
return
nil
,
err
}
// first byte is the algo, the second is the length.
// (varints someday...)
length
:=
uint
(
mhhdr
[
1
])
if
length
>
127
{
return
nil
,
fmt
.
Errorf
(
"varints not yet supported (length is %d)"
,
length
)
}
buf
:=
make
([]
byte
,
length
+
2
)
buf
[
0
]
=
mhhdr
[
0
]
buf
[
1
]
=
mhhdr
[
1
]
if
_
,
err
:=
io
.
ReadFull
(
r
.
r
,
buf
[
2
:
]);
err
!=
nil
{
return
nil
,
err
}
return
Cast
(
buf
)
}
type
mhWriter
struct
{
w
io
.
Writer
}
func
(
w
*
mhWriter
)
Write
(
buf
[]
byte
)
(
n
int
,
err
error
)
{
return
w
.
w
.
Write
(
buf
)
}
func
(
w
*
mhWriter
)
WriteMultihash
(
m
Multihash
)
error
{
_
,
err
:=
w
.
w
.
Write
([]
byte
(
m
))
return
err
}
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/io_test.go
deleted
100644 → 0
View file @
4a64aae7
package
multihash
import
(
"bytes"
"io"
"testing"
)
func
TestReader
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
for
_
,
tc
:=
range
testCases
{
m
,
err
:=
tc
.
Multihash
()
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
buf
.
Write
([]
byte
(
m
))
}
r
:=
NewReader
(
&
buf
)
for
_
,
tc
:=
range
testCases
{
h
,
err
:=
tc
.
Multihash
()
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
h2
,
err
:=
r
.
ReadMultihash
()
if
err
!=
nil
{
t
.
Error
(
err
)
continue
}
if
!
bytes
.
Equal
(
h
,
h2
)
{
t
.
Error
(
"h and h2 should be equal"
)
}
}
}
func
TestWriter
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
w
:=
NewWriter
(
&
buf
)
for
_
,
tc
:=
range
testCases
{
m
,
err
:=
tc
.
Multihash
()
if
err
!=
nil
{
t
.
Error
(
err
)
continue
}
if
err
:=
w
.
WriteMultihash
(
m
);
err
!=
nil
{
t
.
Error
(
err
)
continue
}
buf2
:=
make
([]
byte
,
len
(
m
))
if
_
,
err
:=
io
.
ReadFull
(
&
buf
,
buf2
);
err
!=
nil
{
t
.
Error
(
err
)
continue
}
if
!
bytes
.
Equal
(
m
,
buf2
)
{
t
.
Error
(
"m and buf2 should be equal"
)
}
}
}
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/multihash.go
deleted
100644 → 0
View file @
4a64aae7
package
multihash
import
(
"encoding/hex"
"errors"
"fmt"
b58
"QmNsoHoCVhgXcv1Yg45jtkMgimxorTAN36fV9AQMFXHHAQ/go-base58"
)
// errors
var
(
ErrUnknownCode
=
errors
.
New
(
"unknown multihash code"
)
ErrTooShort
=
errors
.
New
(
"multihash too short. must be > 3 bytes"
)
ErrTooLong
=
errors
.
New
(
"multihash too long. must be < 129 bytes"
)
ErrLenNotSupported
=
errors
.
New
(
"multihash does not yet support digests longer than 127 bytes"
)
)
// ErrInconsistentLen is returned when a decoded multihash has an inconsistent length
type
ErrInconsistentLen
struct
{
dm
*
DecodedMultihash
}
func
(
e
ErrInconsistentLen
)
Error
()
string
{
return
fmt
.
Sprintf
(
"multihash length inconsistent: %v"
,
e
.
dm
)
}
// constants
const
(
SHA1
=
0x11
SHA2_256
=
0x12
SHA2_512
=
0x13
SHA3
=
0x14
BLAKE2B
=
0x40
BLAKE2S
=
0x41
)
// Names maps the name of a hash to the code
var
Names
=
map
[
string
]
int
{
"sha1"
:
SHA1
,
"sha2-256"
:
SHA2_256
,
"sha2-512"
:
SHA2_512
,
"sha3"
:
SHA3
,
"blake2b"
:
BLAKE2B
,
"blake2s"
:
BLAKE2S
,
}
// Codes maps a hash code to it's name
var
Codes
=
map
[
int
]
string
{
SHA1
:
"sha1"
,
SHA2_256
:
"sha2-256"
,
SHA2_512
:
"sha2-512"
,
SHA3
:
"sha3"
,
BLAKE2B
:
"blake2b"
,
BLAKE2S
:
"blake2s"
,
}
// DefaultLengths maps a hash code to it's default length
var
DefaultLengths
=
map
[
int
]
int
{
SHA1
:
20
,
SHA2_256
:
32
,
SHA2_512
:
64
,
SHA3
:
64
,
BLAKE2B
:
64
,
BLAKE2S
:
32
,
}
type
DecodedMultihash
struct
{
Code
int
Name
string
Length
int
Digest
[]
byte
}
type
Multihash
[]
byte
func
(
m
*
Multihash
)
HexString
()
string
{
return
hex
.
EncodeToString
([]
byte
(
*
m
))
}
func
(
m
*
Multihash
)
String
()
string
{
return
m
.
HexString
()
}
func
FromHexString
(
s
string
)
(
Multihash
,
error
)
{
b
,
err
:=
hex
.
DecodeString
(
s
)
if
err
!=
nil
{
return
Multihash
{},
err
}
return
Cast
(
b
)
}
func
(
m
Multihash
)
B58String
()
string
{
return
b58
.
Encode
([]
byte
(
m
))
}
func
FromB58String
(
s
string
)
(
m
Multihash
,
err
error
)
{
// panic handler, in case we try accessing bytes incorrectly.
defer
func
()
{
if
e
:=
recover
();
e
!=
nil
{
m
=
Multihash
{}
err
=
e
.
(
error
)
}
}()
//b58 smells like it can panic...
b
:=
b58
.
Decode
(
s
)
return
Cast
(
b
)
}
func
Cast
(
buf
[]
byte
)
(
Multihash
,
error
)
{
dm
,
err
:=
Decode
(
buf
)
if
err
!=
nil
{
return
Multihash
{},
err
}
if
!
ValidCode
(
dm
.
Code
)
{
return
Multihash
{},
ErrUnknownCode
}
return
Multihash
(
buf
),
nil
}
// Decode a hash from the given Multihash.
func
Decode
(
buf
[]
byte
)
(
*
DecodedMultihash
,
error
)
{
if
len
(
buf
)
<
3
{
return
nil
,
ErrTooShort
}
if
len
(
buf
)
>
129
{
return
nil
,
ErrTooLong
}
dm
:=
&
DecodedMultihash
{
Code
:
int
(
uint8
(
buf
[
0
])),
Name
:
Codes
[
int
(
uint8
(
buf
[
0
]))],
Length
:
int
(
uint8
(
buf
[
1
])),
Digest
:
buf
[
2
:
],
}
if
len
(
dm
.
Digest
)
!=
dm
.
Length
{
return
nil
,
ErrInconsistentLen
{
dm
}
}
return
dm
,
nil
}
// Encode a hash digest along with the specified function code.
// Note: the length is derived from the length of the digest itself.
func
Encode
(
buf
[]
byte
,
code
int
)
([]
byte
,
error
)
{
if
!
ValidCode
(
code
)
{
return
nil
,
ErrUnknownCode
}
if
len
(
buf
)
>
127
{
return
nil
,
ErrLenNotSupported
}
pre
:=
make
([]
byte
,
2
)
pre
[
0
]
=
byte
(
uint8
(
code
))
pre
[
1
]
=
byte
(
uint8
(
len
(
buf
)))
return
append
(
pre
,
buf
...
),
nil
}
func
EncodeName
(
buf
[]
byte
,
name
string
)
([]
byte
,
error
)
{
return
Encode
(
buf
,
Names
[
name
])
}
// ValidCode checks whether a multihash code is valid.
func
ValidCode
(
code
int
)
bool
{
if
AppCode
(
code
)
{
return
true
}
if
_
,
ok
:=
Codes
[
code
];
ok
{
return
true
}
return
false
}
// AppCode checks whether a multihash code is part of the App range.
func
AppCode
(
code
int
)
bool
{
return
code
>=
0
&&
code
<
0x10
}
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/multihash/.gitignore
deleted
100644 → 0
View file @
4a64aae7
multihash
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/multihash/.gobuilder.yml
deleted
100644 → 0
View file @
4a64aae7
---
artifacts
:
-
LICENSE
-
README.md
-
install.dist.sh
vendor/QmdeauTdyf38KDQB4Cc4CurPWRRb5pej27NCXPA7kbPTJy/go-multihash/multihash/LICENSE
deleted
100644 → 0
View file @
4a64aae7
The MIT License (MIT)
Copyright (c) 2014 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment