Commit 3c640f9a authored by ginuerzh's avatar ginuerzh

update vendor

parent 73bc679b
...@@ -3,19 +3,55 @@ ...@@ -3,19 +3,55 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:6914c49eed986dfb8dffb33516fa129c49929d4d873f41e073c83c11c372b870"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["ed25519","ed25519/internal/edwards25519"] packages = [
revision = "b47b1587369238182299fe4dad77d05b8b461e06" "ed25519",
"ed25519/internal/edwards25519",
]
pruneopts = ""
revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:08e41d63f8dac84d83797368b56cf0b339e42d0224e5e56668963c28aec95685"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"] packages = [
revision = "1e491301e022f8f977054da4c2d852decd59571f" "bpf",
"context",
"internal/iana",
"internal/socket",
"ipv4",
"ipv6",
]
pruneopts = ""
revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de"
[[projects]]
branch = "master"
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
name = "golang.org/x/sync"
packages = ["errgroup"]
pruneopts = ""
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
branch = "master"
digest = "1:149a432fabebb8221a80f77731b1cd63597197ded4f14af606ebe3a0959004ec"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = ""
revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "c4abc38abaeeeeb9be92455c9c02cae32841122b8982aaa067ef25bb8e86ff9d" input-imports = [
"golang.org/x/crypto/ed25519",
"golang.org/x/net/ipv4",
"golang.org/x/net/ipv6",
"golang.org/x/sync/errgroup",
"golang.org/x/sys/unix",
]
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1
...@@ -24,3 +24,15 @@ ...@@ -24,3 +24,15 @@
[[constraint]] [[constraint]]
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
branch = "master"
name = "golang.org/x/sys"
[[constraint]]
branch = "master"
name = "golang.org/x/sync"
...@@ -67,6 +67,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: ...@@ -67,6 +67,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/xor-gate/sshfp * https://github.com/xor-gate/sshfp
* https://github.com/rs/dnstrace * https://github.com/rs/dnstrace
* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) * https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
* https://github.com/semihalev/sdns
Send pull request if you want to be listed here. Send pull request if you want to be listed here.
......
...@@ -7,11 +7,8 @@ import ( ...@@ -7,11 +7,8 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"encoding/binary" "encoding/binary"
"fmt"
"io" "io"
"io/ioutil"
"net" "net"
"net/http"
"strings" "strings"
"time" "time"
) )
...@@ -19,8 +16,6 @@ import ( ...@@ -19,8 +16,6 @@ import (
const ( const (
dnsTimeout time.Duration = 2 * time.Second dnsTimeout time.Duration = 2 * time.Second
tcpIdleTimeout time.Duration = 8 * time.Second tcpIdleTimeout time.Duration = 8 * time.Second
dohMimeType = "application/dns-message"
) )
// A Conn represents a connection to a DNS server. // A Conn represents a connection to a DNS server.
...@@ -44,7 +39,6 @@ type Client struct { ...@@ -44,7 +39,6 @@ type Client struct {
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
HTTPClient *http.Client // The http.Client to use for DNS-over-HTTPS
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight group singleflight
...@@ -132,11 +126,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) { ...@@ -132,11 +126,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
// attribute appropriately // attribute appropriately
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight { if !c.SingleInflight {
if c.Net == "https" {
// TODO(tmthrgd): pipe timeouts into exchangeDOH
return c.exchangeDOH(context.TODO(), m, address)
}
return c.exchange(m, address) return c.exchange(m, address)
} }
...@@ -149,11 +138,6 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er ...@@ -149,11 +138,6 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
cl = cl1 cl = cl1
} }
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
if c.Net == "https" {
// TODO(tmthrgd): pipe timeouts into exchangeDOH
return c.exchangeDOH(context.TODO(), m, address)
}
return c.exchange(m, address) return c.exchange(m, address)
}) })
if r != nil && shared { if r != nil && shared {
...@@ -199,67 +183,6 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro ...@@ -199,67 +183,6 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
return r, rtt, err return r, rtt, err
} }
func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
p, err := m.Pack()
if err != nil {
return nil, 0, err
}
req, err := http.NewRequest(http.MethodPost, a, bytes.NewReader(p))
if err != nil {
return nil, 0, err
}
req.Header.Set("Content-Type", dohMimeType)
req.Header.Set("Accept", dohMimeType)
hc := http.DefaultClient
if c.HTTPClient != nil {
hc = c.HTTPClient
}
if ctx != context.Background() && ctx != context.TODO() {
req = req.WithContext(ctx)
}
t := time.Now()
resp, err := hc.Do(req)
if err != nil {
return nil, 0, err
}
defer closeHTTPBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status)
}
if ct := resp.Header.Get("Content-Type"); ct != dohMimeType {
return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType)
}
p, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, 0, err
}
rtt = time.Since(t)
r = new(Msg)
if err := r.Unpack(p); err != nil {
return r, 0, err
}
// TODO: TSIG? Is it even supported over DoH?
return r, rtt, nil
}
func closeHTTPBody(r io.ReadCloser) error {
io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20))
return r.Close()
}
// ReadMsg reads a message from the connection co. // ReadMsg reads a message from the connection co.
// If the received message contains a TSIG record the transaction signature // If the received message contains a TSIG record the transaction signature
// is verified. This method always tries to return the message, however if an // is verified. This method always tries to return the message, however if an
...@@ -559,19 +482,15 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout ...@@ -559,19 +482,15 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout
// context, if present. If there is both a context deadline and a configured // context, if present. If there is both a context deadline and a configured
// timeout on the client, the earliest of the two takes effect. // timeout on the client, the earliest of the two takes effect.
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight && c.Net == "https" {
return c.exchangeDOH(ctx, m, a)
}
var timeout time.Duration var timeout time.Duration
if deadline, ok := ctx.Deadline(); !ok { if deadline, ok := ctx.Deadline(); !ok {
timeout = 0 timeout = 0
} else { } else {
timeout = deadline.Sub(time.Now()) timeout = time.Until(deadline)
} }
// not passing the context to the underlying calls, as the API does not support // not passing the context to the underlying calls, as the API does not support
// context. For timeouts you should set up Client.Dialer and call Client.Exchange. // context. For timeouts you should set up Client.Dialer and call Client.Exchange.
// TODO(tmthrgd): this is a race condition // TODO(tmthrgd,miekg): this is a race condition.
c.Dialer = &net.Dialer{Timeout: timeout} c.Dialer = &net.Dialer{Timeout: timeout}
return c.Exchange(m, a) return c.Exchange(m, a)
} }
package dns package dns
import ( import (
"bufio"
"crypto" "crypto"
"crypto/dsa" "crypto/dsa"
"crypto/ecdsa" "crypto/ecdsa"
...@@ -194,23 +195,12 @@ func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { ...@@ -194,23 +195,12 @@ func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
// parseKey reads a private key from r. It returns a map[string]string, // parseKey reads a private key from r. It returns a map[string]string,
// with the key-value pairs, or an error when the file is not correct. // with the key-value pairs, or an error when the file is not correct.
func parseKey(r io.Reader, file string) (map[string]string, error) { func parseKey(r io.Reader, file string) (map[string]string, error) {
s, cancel := scanInit(r)
m := make(map[string]string) m := make(map[string]string)
c := make(chan lex) var k string
k := ""
defer func() { c := newKLexer(r)
cancel()
// zlexer can send up to two tokens, the next one and possibly 1 remainders. for l, ok := c.Next(); ok; l, ok = c.Next() {
// Do a non-blocking read.
_, ok := <-c
_, ok = <-c
if !ok {
// too bad
}
}()
// Start the lexer
go klexer(s, c)
for l := range c {
// It should alternate // It should alternate
switch l.value { switch l.value {
case zKey: case zKey:
...@@ -219,41 +209,111 @@ func parseKey(r io.Reader, file string) (map[string]string, error) { ...@@ -219,41 +209,111 @@ func parseKey(r io.Reader, file string) (map[string]string, error) {
if k == "" { if k == "" {
return nil, &ParseError{file, "no private key seen", l} return nil, &ParseError{file, "no private key seen", l}
} }
//println("Setting", strings.ToLower(k), "to", l.token, "b")
m[strings.ToLower(k)] = l.token m[strings.ToLower(k)] = l.token
k = "" k = ""
} }
} }
// Surface any read errors from r.
if err := c.Err(); err != nil {
return nil, &ParseError{file: file, err: err.Error()}
}
return m, nil return m, nil
} }
// klexer scans the sourcefile and returns tokens on the channel c. type klexer struct {
func klexer(s *scan, c chan lex) { br io.ByteReader
var l lex
str := "" // Hold the current read text readErr error
commt := false
key := true line int
x, err := s.tokenText() column int
defer close(c)
for err == nil { key bool
l.column = s.position.Column
l.line = s.position.Line eol bool // end-of-line
}
func newKLexer(r io.Reader) *klexer {
br, ok := r.(io.ByteReader)
if !ok {
br = bufio.NewReaderSize(r, 1024)
}
return &klexer{
br: br,
line: 1,
key: true,
}
}
func (kl *klexer) Err() error {
if kl.readErr == io.EOF {
return nil
}
return kl.readErr
}
// readByte returns the next byte from the input
func (kl *klexer) readByte() (byte, bool) {
if kl.readErr != nil {
return 0, false
}
c, err := kl.br.ReadByte()
if err != nil {
kl.readErr = err
return 0, false
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if kl.eol {
kl.line++
kl.column = 0
kl.eol = false
}
if c == '\n' {
kl.eol = true
} else {
kl.column++
}
return c, true
}
func (kl *klexer) Next() (lex, bool) {
var (
l lex
str strings.Builder
commt bool
)
for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
l.line, l.column = kl.line, kl.column
switch x { switch x {
case ':': case ':':
if commt { if commt || !kl.key {
break break
} }
l.token = str
if key { kl.key = false
l.value = zKey
c <- l // Next token is a space, eat it
// Next token is a space, eat it kl.readByte()
s.tokenText()
key = false l.value = zKey
str = "" l.token = str.String()
} else { return l, true
l.value = zValue
}
case ';': case ';':
commt = true commt = true
case '\n': case '\n':
...@@ -261,24 +321,32 @@ func klexer(s *scan, c chan lex) { ...@@ -261,24 +321,32 @@ func klexer(s *scan, c chan lex) {
// Reset a comment // Reset a comment
commt = false commt = false
} }
kl.key = true
l.value = zValue l.value = zValue
l.token = str l.token = str.String()
c <- l return l, true
str = ""
commt = false
key = true
default: default:
if commt { if commt {
break break
} }
str += string(x)
str.WriteByte(x)
} }
x, err = s.tokenText()
} }
if len(str) > 0 {
if kl.readErr != nil && kl.readErr != io.EOF {
// Don't return any tokens after a read error occurs.
return lex{value: zEOF}, false
}
if str.Len() > 0 {
// Send remainder // Send remainder
l.token = str
l.value = zValue l.value = zValue
c <- l l.token = str.String()
return l, true
} }
return lex{value: zEOF}, false
} }
...@@ -2,8 +2,8 @@ package dns ...@@ -2,8 +2,8 @@ package dns
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io"
"strconv" "strconv"
"strings" "strings"
) )
...@@ -18,152 +18,225 @@ import ( ...@@ -18,152 +18,225 @@ import (
// * rhs (rdata) // * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurrences // But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted. // of $ after that are interpreted.
// Any error are returned as a string value, the empty string signals func (zp *ZoneParser) generate(l lex) (RR, bool) {
// "no error". token := l.token
func generate(l lex, c chan lex, t chan *Token, o string) string {
step := 1 step := 1
if i := strings.IndexAny(l.token, "/"); i != -1 { if i := strings.IndexByte(token, '/'); i >= 0 {
if i+1 == len(l.token) { if i+1 == len(token) {
return "bad step in $GENERATE range" return zp.setParseError("bad step in $GENERATE range", l)
} }
if s, err := strconv.Atoi(l.token[i+1:]); err == nil {
if s < 0 { s, err := strconv.Atoi(token[i+1:])
return "bad step in $GENERATE range" if err != nil || s <= 0 {
} return zp.setParseError("bad step in $GENERATE range", l)
step = s
} else {
return "bad step in $GENERATE range"
} }
l.token = l.token[:i]
step = s
token = token[:i]
} }
sx := strings.SplitN(l.token, "-", 2)
sx := strings.SplitN(token, "-", 2)
if len(sx) != 2 { if len(sx) != 2 {
return "bad start-stop in $GENERATE range" return zp.setParseError("bad start-stop in $GENERATE range", l)
} }
start, err := strconv.Atoi(sx[0]) start, err := strconv.Atoi(sx[0])
if err != nil { if err != nil {
return "bad start in $GENERATE range" return zp.setParseError("bad start in $GENERATE range", l)
} }
end, err := strconv.Atoi(sx[1]) end, err := strconv.Atoi(sx[1])
if err != nil { if err != nil {
return "bad stop in $GENERATE range" return zp.setParseError("bad stop in $GENERATE range", l)
} }
if end < 0 || start < 0 || end < start { if end < 0 || start < 0 || end < start {
return "bad range in $GENERATE range" return zp.setParseError("bad range in $GENERATE range", l)
} }
<-c // _BLANK zp.c.Next() // _BLANK
// Create a complete new string, which we then parse again. // Create a complete new string, which we then parse again.
s := "" var s string
BuildRR: for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
l = <-c if l.err {
if l.value != zNewline && l.value != zEOF { return zp.setParseError("bad data in $GENERATE directive", l)
}
if l.value == zNewline {
break
}
s += l.token s += l.token
goto BuildRR }
}
for i := start; i <= end; i += step { r := &generateReader{
var ( s: s,
escape bool
dom bytes.Buffer cur: start,
mod string start: start,
err error end: end,
offset int step: step,
)
file: zp.file,
for j := 0; j < len(s); j++ { // No 'range' because we need to jump around lex: &l,
switch s[j] { }
case '\\': zp.sub = NewZoneParser(r, zp.origin, zp.file)
if escape { zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
dom.WriteByte('\\') zp.sub.SetDefaultTTL(defaultTtl)
escape = false return zp.subNext()
continue }
}
escape = true type generateReader struct {
case '$': s string
mod = "%d" si int
offset = 0
if escape { cur int
dom.WriteByte('$') start int
escape = false end int
continue step int
}
escape = false mod bytes.Buffer
if j+1 >= len(s) { // End of the string
dom.WriteString(fmt.Sprintf(mod, i+offset)) escape bool
continue
} else { eof bool
if s[j+1] == '$' {
dom.WriteByte('$') file string
j++ lex *lex
continue }
}
} func (r *generateReader) parseError(msg string, end int) *ParseError {
// Search for { and } r.eof = true // Make errors sticky.
if s[j+1] == '{' { // Modifier block
sep := strings.Index(s[j+2:], "}") l := *r.lex
if sep == -1 { l.token = r.s[r.si-1 : end]
return "bad modifier in $GENERATE" l.column += r.si // l.column starts one zBLANK before r.s
}
mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) return &ParseError{r.file, msg, l}
if err != nil { }
return err.Error()
} func (r *generateReader) Read(p []byte) (int, error) {
j += 2 + sep // Jump to it // NewZLexer, through NewZoneParser, should use ReadByte and
} // not end up here.
dom.WriteString(fmt.Sprintf(mod, i+offset))
default: panic("not implemented")
if escape { // Pretty useless here }
escape = false
continue func (r *generateReader) ReadByte() (byte, error) {
} if r.eof {
dom.WriteByte(s[j]) return 0, io.EOF
}
if r.mod.Len() > 0 {
return r.mod.ReadByte()
}
if r.si >= len(r.s) {
r.si = 0
r.cur += r.step
r.eof = r.cur > r.end || r.cur < 0
return '\n', nil
}
si := r.si
r.si++
switch r.s[si] {
case '\\':
if r.escape {
r.escape = false
return '\\', nil
}
r.escape = true
return r.ReadByte()
case '$':
if r.escape {
r.escape = false
return '$', nil
}
mod := "%d"
if si >= len(r.s)-1 {
// End of the string
fmt.Fprintf(&r.mod, mod, r.cur)
return r.mod.ReadByte()
}
if r.s[si+1] == '$' {
r.si++
return '$', nil
}
var offset int
// Search for { and }
if r.s[si+1] == '{' {
// Modifier block
sep := strings.Index(r.s[si+2:], "}")
if sep < 0 {
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
}
var errMsg string
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep)
}
if r.start+offset < 0 || r.end+offset > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
} }
r.si += 2 + sep // Jump to it
} }
// Re-parse the RR and send it on the current channel t
rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) fmt.Fprintf(&r.mod, mod, r.cur+offset)
if err != nil { return r.mod.ReadByte()
return err.Error() default:
if r.escape { // Pretty useless here
r.escape = false
return r.ReadByte()
} }
t <- &Token{RR: rx}
// Its more efficient to first built the rrlist and then parse it in return r.s[si], nil
// one go! But is this a problem?
} }
return ""
} }
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. // Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int, error) { func modToPrintf(s string) (string, int, string) {
xs := strings.Split(s, ",")
// Modifier is { offset [ ,width [ ,base ] ] } - provide default // Modifier is { offset [ ,width [ ,base ] ] } - provide default
// values for optional width and type, if necessary. // values for optional width and type, if necessary.
switch len(xs) { var offStr, widthStr, base string
switch xs := strings.Split(s, ","); len(xs) {
case 1: case 1:
xs = append(xs, "0", "d") offStr, widthStr, base = xs[0], "0", "d"
case 2: case 2:
xs = append(xs, "d") offStr, widthStr, base = xs[0], xs[1], "d"
case 3: case 3:
offStr, widthStr, base = xs[0], xs[1], xs[2]
default: default:
return "", 0, errors.New("bad modifier in $GENERATE") return "", 0, "bad modifier in $GENERATE"
} }
// xs[0] is offset, xs[1] is width, xs[2] is base switch base {
if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { case "o", "d", "x", "X":
return "", 0, errors.New("bad base in $GENERATE") default:
return "", 0, "bad base in $GENERATE"
} }
offset, err := strconv.Atoi(xs[0])
if err != nil || offset > 255 { offset, err := strconv.Atoi(offStr)
return "", 0, errors.New("bad offset in $GENERATE") if err != nil {
return "", 0, "bad offset in $GENERATE"
} }
width, err := strconv.Atoi(xs[1])
if err != nil || width > 255 { width, err := strconv.Atoi(widthStr)
return "", offset, errors.New("bad width in $GENERATE") if err != nil || width < 0 || width > 255 {
return "", 0, "bad width in $GENERATE"
} }
switch {
case width < 0: if width == 0 {
return "", offset, errors.New("bad width in $GENERATE") return "%" + base, offset, ""
case width == 0:
return "%" + xs[1] + xs[2], offset, nil
} }
return "%0" + xs[1] + xs[2], offset, nil
return "%0" + widthStr + base, offset, ""
} }
// +build go1.11,!windows // +build go1.11
// +build aix darwin dragonfly freebsd linux netbsd openbsd
package dns package dns
......
// +build !go1.11 windows // +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package dns package dns
......
...@@ -302,6 +302,12 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c ...@@ -302,6 +302,12 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
} }
// If we did compression and we find something add the pointer here // If we did compression and we find something add the pointer here
if pointer != -1 { if pointer != -1 {
// Clear the msg buffer after the pointer location, otherwise
// packDataNsec writes the wrong data to msg.
tainted := msg[nameoffset:off]
for i := range tainted {
tainted[i] = 0
}
// We have two bytes (14 bits) to put the pointer in // We have two bytes (14 bits) to put the pointer in
// if msg == nil, we will never do compression // if msg == nil, we will never do compression
binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000))
...@@ -367,12 +373,10 @@ Loop: ...@@ -367,12 +373,10 @@ Loop:
var buf [3]byte var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10) bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\') s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ { for i := len(bufs); i < 3; i++ {
s = append(s, '0') s = append(s, '0')
} }
for _, r := range bufs { s = append(s, bufs...)
s = append(s, r)
}
// presentation-format \DDD escapes add 3 extra bytes // presentation-format \DDD escapes add 3 extra bytes
maxLen += 3 maxLen += 3
} else { } else {
...@@ -512,7 +516,7 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { ...@@ -512,7 +516,7 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
off = off0 off = off0
var s string var s string
for off < len(msg) && err == nil { for off < len(msg) && err == nil {
s, off, err = unpackTxtString(msg, off) s, off, err = unpackString(msg, off)
if err == nil { if err == nil {
ss = append(ss, s) ss = append(ss, s)
} }
...@@ -520,39 +524,6 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { ...@@ -520,39 +524,6 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
return return
} }
func unpackTxtString(msg []byte, offset int) (string, int, error) {
if offset+1 > len(msg) {
return "", offset, &Error{err: "overflow unpacking txt"}
}
l := int(msg[offset])
if offset+l+1 > len(msg) {
return "", offset, &Error{err: "overflow unpacking txt"}
}
s := make([]byte, 0, l)
for _, b := range msg[offset+1 : offset+1+l] {
switch b {
case '"', '\\':
s = append(s, '\\', b)
default:
if b < 32 || b > 127 { // unprintable
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ {
s = append(s, '0')
}
for _, r := range bufs {
s = append(s, r)
}
} else {
s = append(s, b)
}
}
}
offset += 1 + l
return string(s), offset, nil
}
// Helpers for dealing with escaped bytes // Helpers for dealing with escaped bytes
func isDigit(b byte) bool { return b >= '0' && b <= '9' } func isDigit(b byte) bool { return b >= '0' && b <= '9' }
...@@ -560,6 +531,10 @@ func dddToByte(s []byte) byte { ...@@ -560,6 +531,10 @@ func dddToByte(s []byte) byte {
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
} }
func dddStringToByte(s string) byte {
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
// Helper function for packing and unpacking // Helper function for packing and unpacking
func intToBytes(i *big.Int, length int) []byte { func intToBytes(i *big.Int, length int) []byte {
buf := i.Bytes() buf := i.Bytes()
......
...@@ -6,7 +6,7 @@ import ( ...@@ -6,7 +6,7 @@ import (
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"net" "net"
"strconv" "strings"
) )
// helper functions called from the generated zmsg.go // helper functions called from the generated zmsg.go
...@@ -267,29 +267,21 @@ func unpackString(msg []byte, off int) (string, int, error) { ...@@ -267,29 +267,21 @@ func unpackString(msg []byte, off int) (string, int, error) {
if off+l+1 > len(msg) { if off+l+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"} return "", off, &Error{err: "overflow unpacking txt"}
} }
s := make([]byte, 0, l) var s strings.Builder
s.Grow(l)
for _, b := range msg[off+1 : off+1+l] { for _, b := range msg[off+1 : off+1+l] {
switch b { switch {
case '"', '\\': case b == '"' || b == '\\':
s = append(s, '\\', b) s.WriteByte('\\')
s.WriteByte(b)
case b < ' ' || b > '~': // unprintable
writeEscapedByte(&s, b)
default: default:
if b < 32 || b > 127 { // unprintable s.WriteByte(b)
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ {
s = append(s, '0')
}
for _, r := range bufs {
s = append(s, r)
}
} else {
s = append(s, b)
}
} }
} }
off += 1 + l off += 1 + l
return string(s), off, nil return s.String(), off, nil
} }
func packString(s string, msg []byte, off int) (int, error) { func packString(s string, msg []byte, off int) (int, error) {
......
...@@ -63,8 +63,10 @@ func (rr *NSEC3) Cover(name string) bool { ...@@ -63,8 +63,10 @@ func (rr *NSEC3) Cover(name string) bool {
} }
nextHash := rr.NextDomain nextHash := rr.NextDomain
if ownerHash == nextHash { // empty interval
return false // if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
if ownerHash == nextHash && nameHash != ownerHash { // empty interval
return true
} }
if ownerHash > nextHash { // end of zone if ownerHash > nextHash { // end of zone
if nameHash > ownerHash { // covered since there is nothing after ownerHash if nameHash > ownerHash { // covered since there is nothing after ownerHash
......
...@@ -105,7 +105,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) ...@@ -105,7 +105,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
return rr, off, err return rr, off, err
} }
setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { setPrivateRR := func(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) {
rr := mkPrivateRR(h.Rrtype) rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h rr.Hdr = h
...@@ -115,7 +115,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) ...@@ -115,7 +115,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
for { for {
// TODO(miek): we could also be returning _QUOTE, this might or might not // TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard) // be an issue (basically parsing TXT becomes hard)
switch l = <-c; l.value { switch l, _ = c.Next(); l.value {
case zNewline, zEOF: case zNewline, zEOF:
break Fetch break Fetch
case zString: case zString:
...@@ -134,7 +134,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) ...@@ -134,7 +134,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
} }
// PrivateHandleRemove removes defenitions required to support private RR type. // PrivateHandleRemove removes definitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) { func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype] rtypestr, ok := TypeToString[rtype]
if ok { if ok {
...@@ -144,5 +144,4 @@ func PrivateHandleRemove(rtype uint16) { ...@@ -144,5 +144,4 @@ func PrivateHandleRemove(rtype uint16) {
delete(StringToType, rtypestr) delete(StringToType, rtypestr)
delete(typeToUnpack, rtype) delete(typeToUnpack, rtype)
} }
return
} }
This diff is collapsed.
This diff is collapsed.
package dns
// Implement a simple scanner, return a byte stream from an io reader.
import (
"bufio"
"context"
"io"
"text/scanner"
)
type scan struct {
src *bufio.Reader
position scanner.Position
eof bool // Have we just seen a eof
ctx context.Context
}
func scanInit(r io.Reader) (*scan, context.CancelFunc) {
s := new(scan)
s.src = bufio.NewReader(r)
s.position.Line = 1
ctx, cancel := context.WithCancel(context.Background())
s.ctx = ctx
return s, cancel
}
// tokenText returns the next byte from the input
func (s *scan) tokenText() (byte, error) {
c, err := s.src.ReadByte()
if err != nil {
return c, err
}
select {
case <-s.ctx.Done():
return c, context.Canceled
default:
break
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if s.eof == true {
s.position.Line++
s.position.Column = 0
s.eof = false
}
if c == '\n' {
s.eof = true
return c, nil
}
s.position.Column++
return c, nil
}
package dns
import (
"strings"
"sync"
)
// ServeMux is an DNS request multiplexer. It matches the zone name of
// each incoming request against a list of registered patterns add calls
// the handler for the pattern that most closely matches the zone name.
//
// ServeMux is DNSSEC aware, meaning that queries for the DS record are
// redirected to the parent zone (if that is also registered), otherwise
// the child gets the query.
//
// ServeMux is also safe for concurrent access from multiple goroutines.
//
// The zero ServeMux is empty and ready for use.
type ServeMux struct {
z map[string]Handler
m sync.RWMutex
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux {
return new(ServeMux)
}
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = NewServeMux()
func (mux *ServeMux) match(q string, t uint16) Handler {
mux.m.RLock()
defer mux.m.RUnlock()
if mux.z == nil {
return nil
}
var handler Handler
// TODO(tmthrgd): Once https://go-review.googlesource.com/c/go/+/137575
// lands in a go release, replace the following with strings.ToLower.
var sb strings.Builder
for i := 0; i < len(q); i++ {
c := q[i]
if !(c >= 'A' && c <= 'Z') {
continue
}
sb.Grow(len(q))
sb.WriteString(q[:i])
for ; i < len(q); i++ {
c := q[i]
if c >= 'A' && c <= 'Z' {
c += 'a' - 'A'
}
sb.WriteByte(c)
}
q = sb.String()
break
}
for off, end := 0, false; !end; off, end = NextLabel(q, off) {
if h, ok := mux.z[q[off:]]; ok {
if t != TypeDS {
return h
}
// Continue for DS to see if we have a parent too, if so delegate to the parent
handler = h
}
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := mux.z["."]; ok {
return h
}
return handler
}
// Handle adds a handler to the ServeMux for pattern.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
if mux.z == nil {
mux.z = make(map[string]Handler)
}
mux.z[Fqdn(pattern)] = handler
mux.m.Unlock()
}
// HandleFunc adds a handler function to the ServeMux for pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
mux.Handle(pattern, HandlerFunc(handler))
}
// HandleRemove deregisters the handler specific for pattern from the ServeMux.
func (mux *ServeMux) HandleRemove(pattern string) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
delete(mux.z, Fqdn(pattern))
mux.m.Unlock()
}
// ServeDNS dispatches the request to the handler whose pattern most
// closely matches the request message.
//
// ServeDNS is DNSSEC aware, meaning that queries for the DS record
// are redirected to the parent zone (if that is also registered),
// otherwise the child gets the query.
//
// If no handler is found, or there is no question, a standard SERVFAIL
// message is returned
func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
var h Handler
if len(req.Question) >= 1 { // allow more than one question
h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
}
if h != nil {
h.ServeDNS(w, req)
} else {
HandleFailed(w, req)
}
}
// Handle registers the handler with the given pattern
// in the DefaultServeMux. The documentation for
// ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleRemove deregisters the handle with the given pattern
// in the DefaultServeMux.
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
// HandleFunc registers the handler function with the given pattern
// in the DefaultServeMux.
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
This diff is collapsed.
...@@ -127,8 +127,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { ...@@ -127,8 +127,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
if offset+1 >= buflen { if offset+1 >= buflen {
continue continue
} }
var rdlen uint16 rdlen := binary.BigEndian.Uint16(buf[offset:])
rdlen = binary.BigEndian.Uint16(buf[offset:])
offset += 2 offset += 2
offset += int(rdlen) offset += int(rdlen)
} }
......
...@@ -419,128 +419,130 @@ type TXT struct { ...@@ -419,128 +419,130 @@ type TXT struct {
func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
func sprintName(s string) string { func sprintName(s string) string {
src := []byte(s) var dst strings.Builder
dst := make([]byte, 0, len(src)) dst.Grow(len(s))
for i := 0; i < len(src); { for i := 0; i < len(s); {
if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
dst = append(dst, src[i:i+2]...) dst.WriteString(s[i : i+2])
i += 2 i += 2
} else { continue
b, n := nextByte(src, i)
if n == 0 {
i++ // dangling back slash
} else if b == '.' {
dst = append(dst, b)
} else {
dst = appendDomainNameByte(dst, b)
}
i += n
} }
b, n := nextByte(s, i)
switch {
case n == 0:
i++ // dangling back slash
case b == '.':
dst.WriteByte('.')
default:
writeDomainNameByte(&dst, b)
}
i += n
} }
return string(dst) return dst.String()
} }
func sprintTxtOctet(s string) string { func sprintTxtOctet(s string) string {
src := []byte(s) var dst strings.Builder
dst := make([]byte, 0, len(src)) dst.Grow(2 + len(s))
dst = append(dst, '"') dst.WriteByte('"')
for i := 0; i < len(src); { for i := 0; i < len(s); {
if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
dst = append(dst, src[i:i+2]...) dst.WriteString(s[i : i+2])
i += 2 i += 2
} else { continue
b, n := nextByte(src, i) }
if n == 0 {
i++ // dangling back slash b, n := nextByte(s, i)
} else if b == '.' { switch {
dst = append(dst, b) case n == 0:
} else { i++ // dangling back slash
if b < ' ' || b > '~' { case b == '.':
dst = appendByte(dst, b) dst.WriteByte('.')
} else { case b < ' ' || b > '~':
dst = append(dst, b) writeEscapedByte(&dst, b)
} default:
} dst.WriteByte(b)
i += n
} }
i += n
} }
dst = append(dst, '"') dst.WriteByte('"')
return string(dst) return dst.String()
} }
func sprintTxt(txt []string) string { func sprintTxt(txt []string) string {
var out []byte var out strings.Builder
for i, s := range txt { for i, s := range txt {
out.Grow(3 + len(s))
if i > 0 { if i > 0 {
out = append(out, ` "`...) out.WriteString(` "`)
} else { } else {
out = append(out, '"') out.WriteByte('"')
} }
bs := []byte(s) for j := 0; j < len(s); {
for j := 0; j < len(bs); { b, n := nextByte(s, j)
b, n := nextByte(bs, j)
if n == 0 { if n == 0 {
break break
} }
out = appendTXTStringByte(out, b) writeTXTStringByte(&out, b)
j += n j += n
} }
out = append(out, '"') out.WriteByte('"')
} }
return string(out) return out.String()
} }
func appendDomainNameByte(s []byte, b byte) []byte { func writeDomainNameByte(s *strings.Builder, b byte) {
switch b { switch b {
case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape
return append(s, '\\', b) s.WriteByte('\\')
s.WriteByte(b)
default:
writeTXTStringByte(s, b)
} }
return appendTXTStringByte(s, b)
} }
func appendTXTStringByte(s []byte, b byte) []byte { func writeTXTStringByte(s *strings.Builder, b byte) {
switch b { switch {
case '"', '\\': case b == '"' || b == '\\':
return append(s, '\\', b) s.WriteByte('\\')
s.WriteByte(b)
case b < ' ' || b > '~':
writeEscapedByte(s, b)
default:
s.WriteByte(b)
} }
if b < ' ' || b > '~' {
return appendByte(s, b)
}
return append(s, b)
} }
func appendByte(s []byte, b byte) []byte { func writeEscapedByte(s *strings.Builder, b byte) {
var buf [3]byte var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10) bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\') s.WriteByte('\\')
for i := 0; i < 3-len(bufs); i++ { for i := len(bufs); i < 3; i++ {
s = append(s, '0') s.WriteByte('0')
}
for _, r := range bufs {
s = append(s, r)
} }
return s s.Write(bufs)
} }
func nextByte(b []byte, offset int) (byte, int) { func nextByte(s string, offset int) (byte, int) {
if offset >= len(b) { if offset >= len(s) {
return 0, 0 return 0, 0
} }
if b[offset] != '\\' { if s[offset] != '\\' {
// not an escape sequence // not an escape sequence
return b[offset], 1 return s[offset], 1
} }
switch len(b) - offset { switch len(s) - offset {
case 1: // dangling escape case 1: // dangling escape
return 0, 0 return 0, 0
case 2, 3: // too short to be \ddd case 2, 3: // too short to be \ddd
default: // maybe \ddd default: // maybe \ddd
if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) {
return dddToByte(b[offset+1:]), 4 return dddStringToByte(s[offset+1:]), 4
} }
} }
// not \ddd, just an RFC 1035 "quoted" character // not \ddd, just an RFC 1035 "quoted" character
return b[offset+1], 2 return s[offset+1], 2
} }
// SPF RR. See RFC 4408, Section 3.1.1. // SPF RR. See RFC 4408, Section 3.1.1.
......
// +build !windows
package dns package dns
import ( import (
......
// +build windows
package dns
import "net"
// SessionUDP holds the remote address
type SessionUDP struct {
raddr *net.UDPAddr
}
// RemoteAddr returns the remote network address.
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
n, raddr, err := conn.ReadFrom(b)
if err != nil {
return n, nil, err
}
session := &SessionUDP{raddr.(*net.UDPAddr)}
return n, session, err
}
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
n, err := conn.WriteTo(b, session.raddr)
return n, err
}
// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
// use the standard method in udp.go for these.
func setUDPSocketOptions(*net.UDPConn) error { return nil }
func parseDstFromOOB([]byte, net.IP) net.IP { return nil }
...@@ -3,7 +3,7 @@ package dns ...@@ -3,7 +3,7 @@ package dns
import "fmt" import "fmt"
// Version is current version of this library. // Version is current version of this library.
var Version = V{1, 0, 8} var Version = V{1, 0, 15}
// V holds the version of this library. // V holds the version of this library.
type V struct { type V struct {
......
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blowfish
// getNextWord returns the next big-endian uint32 value from the byte slice
// at the given position in a circular manner, updating the position.
func getNextWord(b []byte, pos *int) uint32 {
var w uint32
j := *pos
for i := 0; i < 4; i++ {
w = w<<8 | uint32(b[j])
j++
if j >= len(b) {
j = 0
}
}
*pos = j
return w
}
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
// by the bcrypt package to reuse the Blowfish key schedule during its
// set up. It's unlikely that you need to use this directly.
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
// Using inlined getNextWord for performance.
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
// This is similar to ExpandKey, but folds the salt during the key
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
// and specializing it here is useful.
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
c.p[i] ^= getNextWord(key, &j)
}
j = 0
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[0]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
xr ^= c.p[17]
return xr, xl
}
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[17]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
xr ^= c.p[0]
return xr, xl
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
package blowfish // import "golang.org/x/crypto/blowfish"
// The code is a port of Bruce Schneier's C implementation.
// See https://www.schneier.com/blowfish.html.
import "strconv"
// The Blowfish block size in bytes.
const BlockSize = 8
// A Cipher is an instance of Blowfish encryption using a particular key.
type Cipher struct {
p [18]uint32
s0, s1, s2, s3 [256]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a Cipher.
// The key argument should be the Blowfish key, from 1 to 56 bytes.
func NewCipher(key []byte) (*Cipher, error) {
var result Cipher
if k := len(key); k < 1 || k > 56 {
return nil, KeySizeError(k)
}
initCipher(&result)
ExpandKey(key, &result)
return &result, nil
}
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
// bytes.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
if len(salt) == 0 {
return NewCipher(key)
}
var result Cipher
if k := len(key); k < 1 {
return nil, KeySizeError(k)
}
initCipher(&result)
expandKeyWithSalt(key, salt, &result)
return &result, nil
}
// BlockSize returns the Blowfish block size, 8 bytes.
// It is necessary to satisfy the Block interface in the
// package "crypto/cipher".
func (c *Cipher) BlockSize() int { return BlockSize }
// Encrypt encrypts the 8-byte buffer src using the key k
// and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = encryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
// Decrypt decrypts the 8-byte buffer src using the key k
// and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = decryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
func initCipher(c *Cipher) {
copy(c.p[0:], p[0:])
copy(c.s0[0:], s0[0:])
copy(c.s1[0:], s1[0:])
copy(c.s2[0:], s2[0:])
copy(c.s3[0:], s3[0:])
}
This diff is collapsed.
This diff is collapsed.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
#define REDMASK51 0x0007FFFFFFFFFFFF
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
curlimage:
docker build -t gohttp2/curl .
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment