Commit 30217a4c authored by Miek Gieben's avatar Miek Gieben Committed by GitHub

Drop caddy from vendor (#700)

* Removed caddy

* new stuff

* Now need to go get caddy

* Duh
parent 18bc52b5
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
[[projects]] [[projects]]
name = "github.com/Shopify/sarama" name = "github.com/Shopify/sarama"
packages = ["."] packages = ["."]
revision = "0fb560e5f7fbcaee2f75e3c34174320709f69944" revision = "c01858abb625b73a3af51d0798e4ad42c8147093"
version = "v1.11.0" version = "v1.12.0"
[[projects]] [[projects]]
name = "github.com/apache/thrift" name = "github.com/apache/thrift"
...@@ -46,14 +46,14 @@ ...@@ -46,14 +46,14 @@
[[projects]] [[projects]]
name = "github.com/coreos/etcd" name = "github.com/coreos/etcd"
packages = ["client","pkg/pathutil","pkg/types"] packages = ["client","pkg/pathutil","pkg/types"]
revision = "e5b7ee2d03627ca33201da428b8110ef7c3e95f1" revision = "d267ca9c184e953554257d0acdd1dc9c47d38229"
version = "v3.1.6" version = "v3.1.8"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/coreos/go-oidc" name = "github.com/coreos/go-oidc"
packages = ["http","jose","key","oauth2","oidc"] packages = ["http","jose","key","oauth2","oidc"]
revision = "5157aa730c25a7531d4b99483e6a440d4ab735a0" revision = "c797a55f1c1001ec3169f1d0fbb4c5523563bec6"
[[projects]] [[projects]]
name = "github.com/coreos/pkg" name = "github.com/coreos/pkg"
...@@ -97,12 +97,6 @@ ...@@ -97,12 +97,6 @@
revision = "777bb3f19bcafe2575ffb2a3e46af92509ae9594" revision = "777bb3f19bcafe2575ffb2a3e46af92509ae9594"
version = "v1.2" version = "v1.2"
[[projects]]
branch = "master"
name = "github.com/flynn/go-shlex"
packages = ["."]
revision = "3f9db97f856818214da2e1057f8ad84803971cff"
[[projects]] [[projects]]
name = "github.com/fsnotify/fsnotify" name = "github.com/fsnotify/fsnotify"
packages = ["."] packages = ["."]
...@@ -143,7 +137,7 @@ ...@@ -143,7 +137,7 @@
branch = "master" branch = "master"
name = "github.com/go-openapi/swag" name = "github.com/go-openapi/swag"
packages = ["."] packages = ["."]
revision = "24ebf76d720bab64f62824d76bced3184a65490d" revision = "e43299b4afa7bc7f22e5e82e3d48607230e4c177"
[[projects]] [[projects]]
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
...@@ -160,8 +154,8 @@ ...@@ -160,8 +154,8 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
packages = ["proto"] packages = ["proto","ptypes/any"]
revision = "2bba0603135d7d7f5cb73b2125beeda19c09f4ef" revision = "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
[[projects]] [[projects]]
branch = "master" branch = "master"
...@@ -185,7 +179,7 @@ ...@@ -185,7 +179,7 @@
branch = "master" branch = "master"
name = "github.com/grpc-ecosystem/grpc-opentracing" name = "github.com/grpc-ecosystem/grpc-opentracing"
packages = ["go/otgrpc"] packages = ["go/otgrpc"]
revision = "ef7d6c8df7564c20c0a19ed9737f6d67990c61fa" revision = "6c130eed1e297e1aa4d415a50c90d0c81c52677e"
[[projects]] [[projects]]
branch = "master" branch = "master"
...@@ -215,13 +209,7 @@ ...@@ -215,13 +209,7 @@
branch = "master" branch = "master"
name = "github.com/juju/ratelimit" name = "github.com/juju/ratelimit"
packages = ["."] packages = ["."]
revision = "acf38b000a03e4ab89e40f20f1e548f4e6ac7f72" revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
[[projects]]
name = "github.com/klauspost/crc32"
packages = ["."]
revision = "cb6bfca970f6908083f26f39a79009d608efd5cd"
version = "v1.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
...@@ -233,7 +221,7 @@ ...@@ -233,7 +221,7 @@
branch = "master" branch = "master"
name = "github.com/mailru/easyjson" name = "github.com/mailru/easyjson"
packages = ["buffer","jlexer","jwriter"] packages = ["buffer","jlexer","jwriter"]
revision = "3f09c2282fc5ad74b3d04a485311f3173c2431d3" revision = "44c0351a5bc860bcb2608d54aa03ea686c4e7b25"
[[projects]] [[projects]]
name = "github.com/matttproud/golang_protobuf_extensions" name = "github.com/matttproud/golang_protobuf_extensions"
...@@ -241,23 +229,17 @@ ...@@ -241,23 +229,17 @@
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
version = "v1.0.0" version = "v1.0.0"
[[projects]]
name = "github.com/mholt/caddy"
packages = [".","caddyfile","startupshutdown"]
revision = "27785f7993319b615acae4ff2a33cdb36fb68376"
version = "v0.10.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/miekg/dns" name = "github.com/miekg/dns"
packages = ["."] packages = ["."]
revision = "113c7538ea6d8f429071f901bd26af59cc9676fe" revision = "fb6fbed0f5ec4e418de4f156c18d2e4f9bc854e7"
[[projects]] [[projects]]
name = "github.com/opentracing/opentracing-go" name = "github.com/opentracing/opentracing-go"
packages = [".","ext","log"] packages = [".","ext","log"]
revision = "6edb48674bd9467b8e91fda004f2bd7202d60ce4" revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.1" version = "v1.0.2"
[[projects]] [[projects]]
name = "github.com/openzipkin/zipkin-go-opentracing" name = "github.com/openzipkin/zipkin-go-opentracing"
...@@ -272,10 +254,10 @@ ...@@ -272,10 +254,10 @@
version = "v1.0" version = "v1.0"
[[projects]] [[projects]]
branch = "master"
name = "github.com/pierrec/lz4" name = "github.com/pierrec/lz4"
packages = ["."] packages = ["."]
revision = "f5b77fd73d83122495309c0f459b810f83cc291f" revision = "5c9560bfa9ace2bf86080bf40d46b34ae44604df"
version = "v1.0"
[[projects]] [[projects]]
name = "github.com/pierrec/xxHash" name = "github.com/pierrec/xxHash"
...@@ -305,7 +287,7 @@ ...@@ -305,7 +287,7 @@
branch = "master" branch = "master"
name = "github.com/prometheus/procfs" name = "github.com/prometheus/procfs"
packages = [".","xfs"] packages = [".","xfs"]
revision = "6ac8c5d890d415025dd5aae7595bcb2a6e7e2fad" revision = "65c1f6f8f0fc1e2185eb9863a3bc751496404259"
[[projects]] [[projects]]
branch = "master" branch = "master"
...@@ -317,7 +299,7 @@ ...@@ -317,7 +299,7 @@
branch = "master" branch = "master"
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
packages = ["."] packages = ["."]
revision = "f1d95a35e132e8a1868023a08932b14f0b8b8fcb" revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
[[projects]] [[projects]]
branch = "master" branch = "master"
...@@ -329,31 +311,31 @@ ...@@ -329,31 +311,31 @@
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["ssh/terminal"] packages = ["ssh/terminal"]
revision = "c7af5bf2638a1164f2eb5467c39c6cffbd13a02e" revision = "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"] packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
revision = "da118f7b8e5954f39d0d2130ab35d4bf0e3cb344" revision = "3da985ce5951d99de868be4385f21ea6c2b22f24"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/oauth2" name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"] packages = [".","google","internal","jws","jwt"]
revision = "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" revision = "f047394b6d14284165300fd82dad67edb3a4d7f6"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = ["unix"]
revision = "9ccfe848b9db8435a24c424abbc07a921adf1df5" revision = "b90f89a1e7a9c1f6b918820b3daa7f08488c8594"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/text" name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"] packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
revision = "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4" revision = "4ee4af566555f5fbe026368b75596286a312663a"
[[projects]] [[projects]]
name = "google.golang.org/appengine" name = "google.golang.org/appengine"
...@@ -361,11 +343,17 @@ ...@@ -361,11 +343,17 @@
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0" version = "v1.0.0"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "aa2eb687b4d3e17154372564ad8d6bf11c3cf21f"
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [".","codes","credentials","grpclog","internal","keepalive","metadata","naming","peer","stats","tap","transport"] packages = [".","codes","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
revision = "8050b9cbc271307e5a716a9d782803d09b0d6f2d" revision = "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
version = "v1.2.1" version = "v1.3.0"
[[projects]] [[projects]]
name = "gopkg.in/inf.v0" name = "gopkg.in/inf.v0"
...@@ -388,6 +376,6 @@ ...@@ -388,6 +376,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "9c7d469cf5543db6f5df55c0e72b1223fc15df4ef1242861e6981d2527eed2c9" inputs-digest = "a59be89c7a92b81f951b47efd072c2af04fb66b4f2b7a780cadb7466559d0d6b"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
## "ignored" lists a set of packages (not projects) that are ignored when ## "ignored" lists a set of packages (not projects) that are ignored when
## dep statically analyzes source code. Ignored packages can be in this project, ## dep statically analyzes source code. Ignored packages can be in this project,
## or in a dependency. ## or in a dependency.
# ignored = ["github.com/user/project/badpkg"] ignored = ["github.com/mholt/caddy", "github.com/mholt/caddy/caddyfile", "github.com/mholt/caddy/startupshutdown"]
## Dependencies define constraints on dependent projects. They are respected by ## Dependencies define constraints on dependent projects. They are respected by
## dep whether coming from the Gopkg.toml of the current project or a dependency. ## dep whether coming from the Gopkg.toml of the current project or a dependency.
......
...@@ -7,11 +7,11 @@ all: coredns ...@@ -7,11 +7,11 @@ all: coredns
# Phony this to ensure we always build the binary. # Phony this to ensure we always build the binary.
# TODO: Add .go file dependencies. # TODO: Add .go file dependencies.
.PHONY: coredns .PHONY: coredns
coredns: check coredns: check caddy
go build $(BUILD_VERBOSE) -ldflags="-s -w" go build $(BUILD_VERBOSE) -ldflags="-s -w"
.PHONY: deps .PHONY: deps
deps: core/zmiddleware.go core/dnsserver/zdirectives.go deps: core/zmiddleware.go core/dnsserver/zdirectives.go caddy
go get -u github.com/golang/lint/golint go get -u github.com/golang/lint/golint
.PHONY: check .PHONY: check
...@@ -25,6 +25,10 @@ test: check ...@@ -25,6 +25,10 @@ test: check
testk8s: check testk8s: check
go test -race $(TEST_VERBOSE) -tags=k8s -run 'TestKubernetes' ./test ./middleware/kubernetes/... go test -race $(TEST_VERBOSE) -tags=k8s -run 'TestKubernetes' ./test ./middleware/kubernetes/...
.PHONY: caddy
caddy:
go get github.com/mholt/caddy
.PHONY: coverage .PHONY: coverage
coverage: check coverage: check
set -e -x set -e -x
......
language: go language: go
go: go:
- 1.6.3
- 1.7.3 - 1.7.3
- 1.8
env: env:
global: global:
...@@ -12,8 +12,7 @@ env: ...@@ -12,8 +12,7 @@ env:
- DEBUG=true - DEBUG=true
matrix: matrix:
- KAFKA_VERSION=0.9.0.1 - KAFKA_VERSION=0.9.0.1
- KAFKA_VERSION=0.10.0.1 - KAFKA_VERSION=0.10.2.0
- KAFKA_VERSION=0.10.1.0
before_install: before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} - export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
......
# Changelog # Changelog
#### Version 1.12.0 (2017-05-08)
New Features:
- Added support for the `ApiVersions` request and response pair, and Kafka
version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
that you still need to specify the Kafka version in the Sarama configuration
for the time being.
- Added a `Brokers` method to the Client which returns the complete set of
active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- Added an `InSyncReplicas` method to the Client which returns the set of all
in-sync broker IDs for the given partition, now that the Kafka versions for
which this was misleading are no longer in our supported set
([#872](https://github.com/Shopify/sarama/pull/872)).
- Added a `NewCustomHashPartitioner` method which allows constructing a hash
partitioner with a custom hash method in case the default (FNV-1a) is not
suitable
([#837](https://github.com/Shopify/sarama/pull/837),
[#841](https://github.com/Shopify/sarama/pull/841)).
Improvements:
- Recognize more Kafka error codes
([#859](https://github.com/Shopify/sarama/pull/859)).
Bug Fixes:
- Fix an issue where decoding a malformed FetchRequest would not return the
correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- Respect ordering of group protocols in JoinGroupRequests. This fix is
transparent if you're using the `AddGroupProtocol` or
`AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
([#812](https://github.com/Shopify/sarama/issues/812)).
- Fix an alignment-related issue with atomics on 32-bit architectures
([#859](https://github.com/Shopify/sarama/pull/859)).
#### Version 1.11.0 (2016-12-20) #### Version 1.11.0 (2016-12-20)
_Important:_ As of Sarama 1.11 it is necessary to set the config value of
`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
versions would silently override this value when instantiating a SyncProducer
which led to unexpected values and data races.
New Features: New Features:
- Metrics! Thanks to Sébastien Launay for all his work on this feature - Metrics! Thanks to Sébastien Launay for all his work on this feature
([#701](https://github.com/Shopify/sarama/pull/701), ([#701](https://github.com/Shopify/sarama/pull/701),
......
...@@ -13,12 +13,14 @@ Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apa ...@@ -13,12 +13,14 @@ Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apa
- The [examples](./examples) directory contains more elaborate example applications. - The [examples](./examples) directory contains more elaborate example applications.
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
### Compatibility and API stability ### Compatibility and API stability
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support grace period for older releases. This means we currently officially support
Go 1.7 and 1.6, and Kafka 0.10.0 and 0.9.0, although older releases are Go 1.8 and 1.7, and Kafka 0.10 and 0.9, although older releases are
still likely to work. still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service. Sarama follows semantic versioning and provides API stability via the gopkg.in service.
...@@ -27,7 +29,7 @@ A changelog is available [here](CHANGELOG.md). ...@@ -27,7 +29,7 @@ A changelog is available [here](CHANGELOG.md).
### Contributing ### Contributing
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md). * Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more * Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
technical and design details. technical and design details.
* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) * The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
......
...@@ -50,12 +50,13 @@ func (r *ApiVersionsResponse) encode(pe packetEncoder) error { ...@@ -50,12 +50,13 @@ func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
} }
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
if kerr, err := pd.getInt16(); err != nil { kerr, err := pd.getInt16()
if err != nil {
return err return err
} else {
r.Err = KError(kerr)
} }
r.Err = KError(kerr)
numBlocks, err := pd.getArrayLength() numBlocks, err := pd.getArrayLength()
if err != nil { if err != nil {
return err return err
......
...@@ -17,24 +17,23 @@ import ( ...@@ -17,24 +17,23 @@ import (
// scope. // scope.
type AsyncProducer interface { type AsyncProducer interface {
// AsyncClose triggers a shutdown of the producer, flushing any messages it may // AsyncClose triggers a shutdown of the producer. The shutdown has completed
// have buffered. The shutdown has completed when both the Errors and Successes // when both the Errors and Successes channels have been closed. When calling
// channels have been closed. When calling AsyncClose, you *must* continue to // AsyncClose, you *must* continue to read from those channels in order to
// read from those channels in order to drain the results of any messages in // drain the results of any messages in flight.
// flight.
AsyncClose() AsyncClose()
// Close shuts down the producer and flushes any messages it may have buffered. // Close shuts down the producer and waits for any buffered messages to be
// You must call this function before a producer object passes out of scope, as // flushed. You must call this function before a producer object passes out of
// it may otherwise leak memory. You must call this before calling Close on the // scope, as it may otherwise leak memory. You must call this before calling
// underlying client. // Close on the underlying client.
Close() error Close() error
// Input is the input channel for the user to write messages to that they // Input is the input channel for the user to write messages to that they
// wish to send. // wish to send.
Input() chan<- *ProducerMessage Input() chan<- *ProducerMessage
// Successes is the success output channel back to the user when AckSuccesses is // Successes is the success output channel back to the user when Return.Successes is
// enabled. If Return.Successes is true, you MUST read from this channel or the // enabled. If Return.Successes is true, you MUST read from this channel or the
// Producer will deadlock. It is suggested that you send and read messages // Producer will deadlock. It is suggested that you send and read messages
// together in a single select statement. // together in a single select statement.
...@@ -200,7 +199,7 @@ func (p *asyncProducer) Close() error { ...@@ -200,7 +199,7 @@ func (p *asyncProducer) Close() error {
if p.conf.Producer.Return.Successes { if p.conf.Producer.Return.Successes {
go withRecover(func() { go withRecover(func() {
for _ = range p.successes { for range p.successes {
} }
}) })
} }
......
...@@ -18,7 +18,7 @@ func closeProducer(t *testing.T, p AsyncProducer) { ...@@ -18,7 +18,7 @@ func closeProducer(t *testing.T, p AsyncProducer) {
wg.Add(2) wg.Add(2)
go func() { go func() {
for _ = range p.Successes() { for range p.Successes() {
t.Error("Unexpected message on Successes()") t.Error("Unexpected message on Successes()")
} }
wg.Done() wg.Done()
...@@ -808,7 +808,7 @@ func ExampleAsyncProducer_goroutines() { ...@@ -808,7 +808,7 @@ func ExampleAsyncProducer_goroutines() {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
for _ = range producer.Successes() { for range producer.Successes() {
successes++ successes++
} }
}() }()
......
...@@ -52,7 +52,7 @@ type responsePromise struct { ...@@ -52,7 +52,7 @@ type responsePromise struct {
errors chan error errors chan error
} }
// NewBroker creates and returns a Broker targetting the given host:port address. // NewBroker creates and returns a Broker targeting the given host:port address.
// This does not attempt to actually connect, you have to call Open() for that. // This does not attempt to actually connect, you have to call Open() for that.
func NewBroker(addr string) *Broker { func NewBroker(addr string) *Broker {
return &Broker{id: -1, addr: addr} return &Broker{id: -1, addr: addr}
...@@ -355,6 +355,17 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups ...@@ -355,6 +355,17 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups
return response, nil return response, nil
} }
func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
response := new(ApiVersionsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
b.lock.Lock() b.lock.Lock()
defer b.lock.Unlock() defer b.lock.Unlock()
......
...@@ -284,6 +284,19 @@ var brokerTestTable = []struct { ...@@ -284,6 +284,19 @@ var brokerTestTable = []struct {
t.Error("DescribeGroups request got no response!") t.Error("DescribeGroups request got no response!")
} }
}}, }},
{"ApiVersionsRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := ApiVersionsRequest{}
response, err := broker.ApiVersions(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("ApiVersions request got no response!")
}
}},
} }
func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) { func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {
......
...@@ -17,6 +17,9 @@ type Client interface { ...@@ -17,6 +17,9 @@ type Client interface {
// altered after it has been created. // altered after it has been created.
Config() *Config Config() *Config
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
Brokers() []*Broker
// Topics returns the set of available topics as retrieved from cluster metadata. // Topics returns the set of available topics as retrieved from cluster metadata.
Topics() ([]string, error) Topics() ([]string, error)
...@@ -35,6 +38,11 @@ type Client interface { ...@@ -35,6 +38,11 @@ type Client interface {
// Replicas returns the set of all replica IDs for the given partition. // Replicas returns the set of all replica IDs for the given partition.
Replicas(topic string, partitionID int32) ([]int32, error) Replicas(topic string, partitionID int32) ([]int32, error)
// InSyncReplicas returns the set of all in-sync replica IDs for the given
// partition. In-sync replicas are replicas which are fully caught up with
// the partition leader.
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
// RefreshMetadata takes a list of topics and queries the cluster to refresh the // RefreshMetadata takes a list of topics and queries the cluster to refresh the
// available metadata for those topics. If no topics are provided, it will refresh // available metadata for those topics. If no topics are provided, it will refresh
// metadata for all topics. // metadata for all topics.
...@@ -133,7 +141,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) { ...@@ -133,7 +141,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
} }
// do an initial fetch of all cluster metadata by specifing an empty list of topics // do an initial fetch of all cluster metadata by specifying an empty list of topics
err := client.RefreshMetadata() err := client.RefreshMetadata()
switch err { switch err {
case nil: case nil:
...@@ -157,6 +165,16 @@ func (client *client) Config() *Config { ...@@ -157,6 +165,16 @@ func (client *client) Config() *Config {
return client.conf return client.conf
} }
func (client *client) Brokers() []*Broker {
client.lock.RLock()
defer client.lock.RUnlock()
brokers := make([]*Broker, 0)
for _, broker := range client.brokers {
brokers = append(brokers, broker)
}
return brokers
}
func (client *client) Close() error { func (client *client) Close() error {
if client.Closed() { if client.Closed() {
// Chances are this is being called from a defer() and the error will go unobserved // Chances are this is being called from a defer() and the error will go unobserved
...@@ -282,6 +300,31 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) ...@@ -282,6 +300,31 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error)
return dupeAndSort(metadata.Replicas), nil return dupeAndSort(metadata.Replicas), nil
} }
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
return dupeAndSort(metadata.Isr), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
if client.Closed() { if client.Closed() {
return nil, ErrClosedClient return nil, ErrClosedClient
...@@ -592,12 +635,12 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) ...@@ -592,12 +635,12 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int)
switch err.(type) { switch err.(type) {
case nil: case nil:
// valid response, use it // valid response, use it
if shouldRetry, err := client.updateMetadata(response); shouldRetry { shouldRetry, err := client.updateMetadata(response)
if shouldRetry {
Logger.Println("client/metadata found some partitions to be leaderless") Logger.Println("client/metadata found some partitions to be leaderless")
return retry(err) // note: err can be nil return retry(err) // note: err can be nil
} else {
return err
} }
return err
case PacketEncodingError: case PacketEncodingError:
// didn't even send, return the error // didn't even send, return the error
......
...@@ -196,6 +196,17 @@ func TestClientMetadata(t *testing.T) { ...@@ -196,6 +196,17 @@ func TestClientMetadata(t *testing.T) {
t.Error("Incorrect (or unsorted) replica") t.Error("Incorrect (or unsorted) replica")
} }
isr, err = client.InSyncReplicas("my_topic", 0)
if err != nil {
t.Error(err)
} else if len(isr) != 2 {
t.Error("Client returned incorrect ISRs for partition:", isr)
} else if isr[0] != 1 {
t.Error("Incorrect (or unsorted) ISR:", isr)
} else if isr[1] != 5 {
t.Error("Incorrect (or unsorted) ISR:", isr)
}
leader.Close() leader.Close()
seedBroker.Close() seedBroker.Close()
safeClose(t, client) safeClose(t, client)
......
...@@ -305,10 +305,13 @@ func (c *Config) Validate() error { ...@@ -305,10 +305,13 @@ func (c *Config) Validate() error {
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
} }
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.") Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
} }
if c.Producer.Flush.Bytes >= int(MaxRequestSize) { if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.") Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
}
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
} }
if c.Producer.Timeout%time.Millisecond != 0 { if c.Producer.Timeout%time.Millisecond != 0 {
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
......
...@@ -289,10 +289,11 @@ type PartitionConsumer interface { ...@@ -289,10 +289,11 @@ type PartitionConsumer interface {
} }
type partitionConsumer struct { type partitionConsumer struct {
consumer *consumer highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
conf *Config consumer *consumer
topic string conf *Config
partition int32 topic string
partition int32
broker *brokerConsumer broker *brokerConsumer
messages chan *ConsumerMessage messages chan *ConsumerMessage
...@@ -302,9 +303,8 @@ type partitionConsumer struct { ...@@ -302,9 +303,8 @@ type partitionConsumer struct {
trigger, dying chan none trigger, dying chan none
responseResult error responseResult error
fetchSize int32 fetchSize int32
offset int64 offset int64
highWaterMarkOffset int64
} }
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
...@@ -324,7 +324,7 @@ func (child *partitionConsumer) sendError(err error) { ...@@ -324,7 +324,7 @@ func (child *partitionConsumer) sendError(err error) {
} }
func (child *partitionConsumer) dispatcher() { func (child *partitionConsumer) dispatcher() {
for _ = range child.trigger { for range child.trigger {
select { select {
case <-child.dying: case <-child.dying:
close(child.trigger) close(child.trigger)
...@@ -411,7 +411,7 @@ func (child *partitionConsumer) Close() error { ...@@ -411,7 +411,7 @@ func (child *partitionConsumer) Close() error {
child.AsyncClose() child.AsyncClose()
go withRecover(func() { go withRecover(func() {
for _ = range child.messages { for range child.messages {
// drain // drain
} }
}) })
......
...@@ -51,7 +51,7 @@ func TestConsumerGroupMemberAssignment(t *testing.T) { ...@@ -51,7 +51,7 @@ func TestConsumerGroupMemberAssignment(t *testing.T) {
amt := &ConsumerGroupMemberAssignment{ amt := &ConsumerGroupMemberAssignment{
Version: 1, Version: 1,
Topics: map[string][]int32{ Topics: map[string][]int32{
"one": []int32{0, 2, 4}, "one": {0, 2, 4},
}, },
UserData: []byte{0x01, 0x02, 0x03}, UserData: []byte{0x01, 0x02, 0x03},
} }
......
...@@ -2,8 +2,7 @@ package sarama ...@@ -2,8 +2,7 @@ package sarama
import ( import (
"encoding/binary" "encoding/binary"
"hash/crc32"
"github.com/klauspost/crc32"
) )
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. // crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
......
...@@ -89,12 +89,13 @@ func (gd *GroupDescription) encode(pe packetEncoder) error { ...@@ -89,12 +89,13 @@ func (gd *GroupDescription) encode(pe packetEncoder) error {
} }
func (gd *GroupDescription) decode(pd packetDecoder) (err error) { func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
if kerr, err := pd.getInt16(); err != nil { kerr, err := pd.getInt16()
if err != nil {
return err return err
} else {
gd.Err = KError(kerr)
} }
gd.Err = KError(kerr)
if gd.GroupId, err = pd.getString(); err != nil { if gd.GroupId, err = pd.getString(); err != nil {
return return
} }
......
name: sarama name: sarama
up: up:
- go: 1.7.3 - go:
version: '1.8'
commands: commands:
test: test:
......
...@@ -108,12 +108,20 @@ const ( ...@@ -108,12 +108,20 @@ const (
ErrUnsupportedSASLMechanism KError = 33 ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34 ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35 ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43 ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
) )
func (err KError) Error() string { func (err KError) Error() string {
// Error messages stolen/adapted from // Error messages stolen/adapted from
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol // https://kafka.apache.org/protocol#protocol_error_codes
switch err { switch err {
case ErrNoError: case ErrNoError:
return "kafka server: Not an error, why are you printing me?" return "kafka server: Not an error, why are you printing me?"
...@@ -189,8 +197,24 @@ func (err KError) Error() string { ...@@ -189,8 +197,24 @@ func (err KError) Error() string {
return "kafka server: Request is not valid given the current SASL state." return "kafka server: Request is not valid given the current SASL state."
case ErrUnsupportedVersion: case ErrUnsupportedVersion:
return "kafka server: The version of API is not supported." return "kafka server: The version of API is not supported."
case ErrTopicAlreadyExists:
return "kafka server: Topic with this name already exists."
case ErrInvalidPartitions:
return "kafka server: Number of partitions is invalid."
case ErrInvalidReplicationFactor:
return "kafka server: Replication-factor is invalid."
case ErrInvalidReplicaAssignment:
return "kafka server: Replica assignment is invalid."
case ErrInvalidConfig:
return "kafka server: Configuration is invalid."
case ErrNotController:
return "kafka server: This is not the correct controller for this cluster."
case ErrInvalidRequest:
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
case ErrUnsupportedForMessageFormat: case ErrUnsupportedForMessageFormat:
return "kafka server: The requested operation is not supported by the message format version." return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
} }
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
......
...@@ -92,7 +92,7 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { ...@@ -92,7 +92,7 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
} }
fetchBlock := &fetchRequestBlock{} fetchBlock := &fetchRequestBlock{}
if err = fetchBlock.decode(pd); err != nil { if err = fetchBlock.decode(pd); err != nil {
return nil return err
} }
r.blocks[topic][partition] = fetchBlock r.blocks[topic][partition] = fetchBlock
} }
......
...@@ -10,11 +10,11 @@ func (r *HeartbeatResponse) encode(pe packetEncoder) error { ...@@ -10,11 +10,11 @@ func (r *HeartbeatResponse) encode(pe packetEncoder) error {
} }
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
if kerr, err := pd.getInt16(); err != nil { kerr, err := pd.getInt16()
if err != nil {
return err return err
} else {
r.Err = KError(kerr)
} }
r.Err = KError(kerr)
return nil return nil
} }
......
package sarama package sarama
type GroupProtocol struct {
Name string
Metadata []byte
}
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
p.Name, err = pd.getString()
if err != nil {
return err
}
p.Metadata, err = pd.getBytes()
return err
}
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
if err := pe.putString(p.Name); err != nil {
return err
}
if err := pe.putBytes(p.Metadata); err != nil {
return err
}
return nil
}
type JoinGroupRequest struct { type JoinGroupRequest struct {
GroupId string GroupId string
SessionTimeout int32 SessionTimeout int32
MemberId string MemberId string
ProtocolType string ProtocolType string
GroupProtocols map[string][]byte GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
OrderedGroupProtocols []*GroupProtocol
} }
func (r *JoinGroupRequest) encode(pe packetEncoder) error { func (r *JoinGroupRequest) encode(pe packetEncoder) error {
...@@ -20,16 +45,31 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error { ...@@ -20,16 +45,31 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error {
return err return err
} }
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { if len(r.GroupProtocols) > 0 {
return err if len(r.OrderedGroupProtocols) > 0 {
} return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
for name, metadata := range r.GroupProtocols { }
if err := pe.putString(name); err != nil {
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
return err return err
} }
if err := pe.putBytes(metadata); err != nil { for name, metadata := range r.GroupProtocols {
if err := pe.putString(name); err != nil {
return err
}
if err := pe.putBytes(metadata); err != nil {
return err
}
}
} else {
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
return err return err
} }
for _, protocol := range r.OrderedGroupProtocols {
if err := protocol.encode(pe); err != nil {
return err
}
}
} }
return nil return nil
...@@ -62,16 +102,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { ...@@ -62,16 +102,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
r.GroupProtocols = make(map[string][]byte) r.GroupProtocols = make(map[string][]byte)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
name, err := pd.getString() protocol := &GroupProtocol{}
if err != nil { if err := protocol.decode(pd); err != nil {
return err
}
metadata, err := pd.getBytes()
if err != nil {
return err return err
} }
r.GroupProtocols[protocol.Name] = protocol.Metadata
r.GroupProtocols[name] = metadata r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
} }
return nil return nil
...@@ -90,11 +126,10 @@ func (r *JoinGroupRequest) requiredVersion() KafkaVersion { ...@@ -90,11 +126,10 @@ func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
} }
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
if r.GroupProtocols == nil { r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
r.GroupProtocols = make(map[string][]byte) Name: name,
} Metadata: metadata,
})
r.GroupProtocols[name] = metadata
} }
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
......
...@@ -23,19 +23,35 @@ var ( ...@@ -23,19 +23,35 @@ var (
) )
func TestJoinGroupRequest(t *testing.T) { func TestJoinGroupRequest(t *testing.T) {
var request *JoinGroupRequest request := new(JoinGroupRequest)
request = new(JoinGroupRequest)
request.GroupId = "TestGroup" request.GroupId = "TestGroup"
request.SessionTimeout = 100 request.SessionTimeout = 100
request.ProtocolType = "consumer" request.ProtocolType = "consumer"
testRequest(t, "no protocols", request, joinGroupRequestNoProtocols) testRequest(t, "no protocols", request, joinGroupRequestNoProtocols)
}
func TestJoinGroupRequestOneProtocol(t *testing.T) {
request := new(JoinGroupRequest)
request.GroupId = "TestGroup"
request.SessionTimeout = 100
request.MemberId = "OneProtocol"
request.ProtocolType = "consumer"
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
request.GroupProtocols = make(map[string][]byte)
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
testRequestDecode(t, "one protocol", request, packet)
}
request = new(JoinGroupRequest) func TestJoinGroupRequestDeprecatedEncode(t *testing.T) {
request := new(JoinGroupRequest)
request.GroupId = "TestGroup" request.GroupId = "TestGroup"
request.SessionTimeout = 100 request.SessionTimeout = 100
request.MemberId = "OneProtocol" request.MemberId = "OneProtocol"
request.ProtocolType = "consumer" request.ProtocolType = "consumer"
request.GroupProtocols = make(map[string][]byte)
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
testRequest(t, "one protocol", request, joinGroupRequestOneProtocol) testRequestDecode(t, "one protocol", request, packet)
} }
...@@ -53,12 +53,13 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error { ...@@ -53,12 +53,13 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error {
} }
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
if kerr, err := pd.getInt16(); err != nil { kerr, err := pd.getInt16()
if err != nil {
return err return err
} else {
r.Err = KError(kerr)
} }
r.Err = KError(kerr)
if r.GenerationId, err = pd.getInt32(); err != nil { if r.GenerationId, err = pd.getInt32(); err != nil {
return return
} }
......
...@@ -10,11 +10,11 @@ func (r *LeaveGroupResponse) encode(pe packetEncoder) error { ...@@ -10,11 +10,11 @@ func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
} }
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
if kerr, err := pd.getInt16(); err != nil { kerr, err := pd.getInt16()
if err != nil {
return err return err
} else {
r.Err = KError(kerr)
} }
r.Err = KError(kerr)
return nil return nil
} }
......
...@@ -24,12 +24,13 @@ func (r *ListGroupsResponse) encode(pe packetEncoder) error { ...@@ -24,12 +24,13 @@ func (r *ListGroupsResponse) encode(pe packetEncoder) error {
} }
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
if kerr, err := pd.getInt16(); err != nil { kerr, err := pd.getInt16()
if err != nil {
return err return err
} else {
r.Err = KError(kerr)
} }
r.Err = KError(kerr)
n, err := pd.getArrayLength() n, err := pd.getArrayLength()
if err != nil { if err != nil {
return err return err
......
package sarama package sarama
import ( import (
"runtime"
"testing" "testing"
"time" "time"
) )
...@@ -24,6 +25,17 @@ var ( ...@@ -24,6 +25,17 @@ var (
0x08, 0x08,
0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0} 0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
emptyGzipMessage18 = []byte{
132, 99, 80, 148, //CRC
0x00, // magic version byte
0x01, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
// value
0x00, 0x00, 0x00, 0x17,
0x1f, 0x8b,
0x08,
0, 0, 0, 0, 0, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
emptyLZ4Message = []byte{ emptyLZ4Message = []byte{
132, 219, 238, 101, // CRC 132, 219, 238, 101, // CRC
0x01, // version byte 0x01, // version byte
...@@ -79,7 +91,11 @@ func TestMessageEncoding(t *testing.T) { ...@@ -79,7 +91,11 @@ func TestMessageEncoding(t *testing.T) {
message.Value = []byte{} message.Value = []byte{}
message.Codec = CompressionGZIP message.Codec = CompressionGZIP
testEncodable(t, "empty gzip", &message, emptyGzipMessage) if runtime.Version() == "go1.8" {
testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
} else {
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
}
message.Value = []byte{} message.Value = []byte{}
message.Codec = CompressionLZ4 message.Codec = CompressionLZ4
......
...@@ -87,6 +87,18 @@ type hashPartitioner struct { ...@@ -87,6 +87,18 @@ type hashPartitioner struct {
hasher hash.Hash32 hasher hash.Hash32
} }
// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
return func(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = hasher()
return p
}
}
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a // NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, // random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
// modulus the number of partitions. This ensures that messages with the same key always end up on the // modulus the number of partitions. This ensures that messages with the same key always end up on the
......
...@@ -2,6 +2,7 @@ package sarama ...@@ -2,6 +2,7 @@ package sarama
import ( import (
"crypto/rand" "crypto/rand"
"hash/fnv"
"log" "log"
"testing" "testing"
) )
...@@ -70,6 +71,55 @@ func TestRoundRobinPartitioner(t *testing.T) { ...@@ -70,6 +71,55 @@ func TestRoundRobinPartitioner(t *testing.T) {
} }
} }
func TestNewHashPartitionerWithHasher(t *testing.T) {
// use the current default hasher fnv.New32a()
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
for i := 1; i < 50; i++ {
choice, err := partitioner.Partition(&ProducerMessage{}, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range for nil key.")
}
}
buf := make([]byte, 256)
for i := 1; i < 50; i++ {
if _, err := rand.Read(buf); err != nil {
t.Error(err)
}
assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
}
}
func TestHashPartitionerWithHasherMinInt32(t *testing.T) {
// use the current default hasher fnv.New32a()
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
msg := ProducerMessage{}
// "1468509572224" generates 2147483648 (uint32) result from Sum32 function
// which is -2147483648 or int32's min value
msg.Key = StringEncoder("1468509572224")
choice, err := partitioner.Partition(&msg, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range for nil key.")
}
}
func TestHashPartitioner(t *testing.T) { func TestHashPartitioner(t *testing.T) {
partitioner := NewHashPartitioner("mytopic") partitioner := NewHashPartitioner("mytopic")
......
...@@ -76,11 +76,12 @@ func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { ...@@ -76,11 +76,12 @@ func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
} }
if r.Version >= 1 { if r.Version >= 1 {
if millis, err := pd.getInt32(); err != nil { millis, err := pd.getInt32()
if err != nil {
return err return err
} else {
r.ThrottleTime = time.Duration(millis) * time.Millisecond
} }
r.ThrottleTime = time.Duration(millis) * time.Millisecond
} }
return nil return nil
......
...@@ -204,11 +204,12 @@ func (rd *realDecoder) getStringArray() ([]string, error) { ...@@ -204,11 +204,12 @@ func (rd *realDecoder) getStringArray() ([]string, error) {
ret := make([]string, n) ret := make([]string, n)
for i := range ret { for i := range ret {
if str, err := rd.getString(); err != nil { str, err := rd.getString()
if err != nil {
return nil, err return nil, err
} else {
ret[i] = str
} }
ret[i] = str
} }
return ret, nil return ret, nil
} }
......
...@@ -50,7 +50,11 @@ func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in [] ...@@ -50,7 +50,11 @@ func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []
} }
func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) { func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
// Encoder request packet := testRequestEncode(t, name, rb, expected)
testRequestDecode(t, name, rb, packet)
}
func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte {
req := &request{correlationID: 123, clientID: "foo", body: rb} req := &request{correlationID: 123, clientID: "foo", body: rb}
packet, err := encode(req, nil) packet, err := encode(req, nil)
headerSize := 14 + len("foo") headerSize := 14 + len("foo")
...@@ -59,7 +63,10 @@ func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) { ...@@ -59,7 +63,10 @@ func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
} else if !bytes.Equal(packet[headerSize:], expected) { } else if !bytes.Equal(packet[headerSize:], expected) {
t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected) t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected)
} }
// Decoder request return packet
}
func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte) {
decoded, n, err := decodeRequest(bytes.NewReader(packet)) decoded, n, err := decodeRequest(bytes.NewReader(packet))
if err != nil { if err != nil {
t.Error("Failed to decode request", err) t.Error("Failed to decode request", err)
......
...@@ -11,13 +11,13 @@ func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { ...@@ -11,13 +11,13 @@ func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
} }
func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
if kerr, err := pd.getInt16(); err != nil { kerr, err := pd.getInt16()
if err != nil {
return err return err
} else {
r.Err = KError(kerr)
} }
var err error r.Err = KError(kerr)
if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
return err return err
} }
......
...@@ -17,12 +17,13 @@ func (r *SyncGroupResponse) encode(pe packetEncoder) error { ...@@ -17,12 +17,13 @@ func (r *SyncGroupResponse) encode(pe packetEncoder) error {
} }
func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
if kerr, err := pd.getInt16(); err != nil { kerr, err := pd.getInt16()
if err != nil {
return err return err
} else {
r.Err = KError(kerr)
} }
r.Err = KError(kerr)
r.MemberAssignment, err = pd.getBytes() r.MemberAssignment, err = pd.getBytes()
return return
} }
......
...@@ -25,10 +25,10 @@ type SyncProducer interface { ...@@ -25,10 +25,10 @@ type SyncProducer interface {
// SendMessages will return an error. // SendMessages will return an error.
SendMessages(msgs []*ProducerMessage) error SendMessages(msgs []*ProducerMessage) error
// Close shuts down the producer and flushes any messages it may have buffered. // Close shuts down the producer and waits for any buffered messages to be
// You must call this function before a producer object passes out of scope, as // flushed. You must call this function before a producer object passes out of
// it may otherwise leak memory. You must call this before calling Close on the // scope, as it may otherwise leak memory. You must call this before calling
// underlying client. // Close on the underlying client.
Close() error Close() error
} }
......
...@@ -76,17 +76,17 @@ func TestSyncProducerBatch(t *testing.T) { ...@@ -76,17 +76,17 @@ func TestSyncProducerBatch(t *testing.T) {
} }
err = producer.SendMessages([]*ProducerMessage{ err = producer.SendMessages([]*ProducerMessage{
&ProducerMessage{ {
Topic: "my_topic", Topic: "my_topic",
Value: StringEncoder(TestMessage), Value: StringEncoder(TestMessage),
Metadata: "test", Metadata: "test",
}, },
&ProducerMessage{ {
Topic: "my_topic", Topic: "my_topic",
Value: StringEncoder(TestMessage), Value: StringEncoder(TestMessage),
Metadata: "test", Metadata: "test",
}, },
&ProducerMessage{ {
Topic: "my_topic", Topic: "my_topic",
Value: StringEncoder(TestMessage), Value: StringEncoder(TestMessage),
Metadata: "test", Metadata: "test",
......
...@@ -148,5 +148,6 @@ var ( ...@@ -148,5 +148,6 @@ var (
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
minVersion = V0_8_2_0 minVersion = V0_8_2_0
) )
...@@ -78,7 +78,7 @@ That's it! etcd is now running and serving client requests. For more ...@@ -78,7 +78,7 @@ That's it! etcd is now running and serving client requests. For more
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication.
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd [iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
### Running a local etcd cluster ### Running a local etcd cluster
...@@ -136,5 +136,3 @@ See [reporting bugs](Documentation/reporting_bugs.md) for details about reportin ...@@ -136,5 +136,3 @@ See [reporting bugs](Documentation/reporting_bugs.md) for details about reportin
### License ### License
etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
// +build ignore
// This file is used to generate keys for tests.
package main
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"text/template"
jose "gopkg.in/square/go-jose.v2"
)
type key struct {
name string
new func() (crypto.Signer, error)
}
var keys = []key{
{
"ECDSA_256", func() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
},
},
{
"ECDSA_384", func() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
},
},
{
"ECDSA_521", func() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
},
},
{
"RSA_1024", func() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 1024)
},
},
{
"RSA_2048", func() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 2048)
},
},
{
"RSA_4096", func() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 4096)
},
},
}
func newJWK(k key, prefix, ident string) (privBytes, pubBytes []byte, err error) {
priv, err := k.new()
if err != nil {
return nil, nil, fmt.Errorf("generate %s: %v", k.name, err)
}
pub := priv.Public()
privKey := &jose.JSONWebKey{Key: priv}
thumbprint, err := privKey.Thumbprint(crypto.SHA256)
if err != nil {
return nil, nil, fmt.Errorf("computing thumbprint: %v", err)
}
keyID := hex.EncodeToString(thumbprint)
privKey.KeyID = keyID
pubKey := &jose.JSONWebKey{Key: pub, KeyID: keyID}
privBytes, err = json.MarshalIndent(privKey, prefix, ident)
if err != nil {
return
}
pubBytes, err = json.MarshalIndent(pubKey, prefix, ident)
return
}
type keyData struct {
Name string
Priv string
Pub string
}
var tmpl = template.Must(template.New("").Parse(`// +build !golint
// This file contains statically created JWKs for tests created by gen.go
package oidc
import (
"encoding/json"
jose "gopkg.in/square/go-jose.v2"
)
func mustLoadJWK(s string) jose.JSONWebKey {
var jwk jose.JSONWebKey
if err := json.Unmarshal([]byte(s), &jwk); err != nil {
panic(err)
}
return jwk
}
var (
{{- range $i, $key := .Keys }}
testKey{{ $key.Name }} = mustLoadJWK(` + "`" + `{{ $key.Pub }}` + "`" + `)
testKey{{ $key.Name }}_Priv = mustLoadJWK(` + "`" + `{{ $key.Priv }}` + "`" + `)
{{ end -}}
)
`))
func main() {
var tmplData struct {
Keys []keyData
}
for _, k := range keys {
for i := 0; i < 4; i++ {
log.Printf("generating %s", k.name)
priv, pub, err := newJWK(k, "\t", "\t")
if err != nil {
log.Fatal(err)
}
name := fmt.Sprintf("%s_%d", k.name, i)
tmplData.Keys = append(tmplData.Keys, keyData{
Name: name,
Priv: string(priv),
Pub: string(pub),
})
}
}
buff := new(bytes.Buffer)
if err := tmpl.Execute(buff, tmplData); err != nil {
log.Fatalf("excuting template: %v", err)
}
if err := ioutil.WriteFile("jose_test.go", buff.Bytes(), 0644); err != nil {
log.Fatal(err)
}
}
This diff is collapsed.
...@@ -3,6 +3,7 @@ package oidc ...@@ -3,6 +3,7 @@ package oidc
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
...@@ -38,147 +39,158 @@ type remoteKeySet struct { ...@@ -38,147 +39,158 @@ type remoteKeySet struct {
// guard all other fields // guard all other fields
mu sync.Mutex mu sync.Mutex
// inflightCtx suppresses parallel execution of updateKeys and allows // inflight suppresses parallel execution of updateKeys and allows
// multiple goroutines to wait for its result. // multiple goroutines to wait for its result.
// Its Err() method returns any errors encountered during updateKeys. inflight *inflight
//
// If nil, there is no inflight updateKeys request.
inflightCtx *inflight
// A set of cached keys and their expiry. // A set of cached keys and their expiry.
cachedKeys []jose.JSONWebKey cachedKeys []jose.JSONWebKey
expiry time.Time expiry time.Time
} }
// inflight is used to wait on some in-flight request from multiple goroutines // inflight is used to wait on some in-flight request from multiple goroutines.
type inflight struct { type inflight struct {
done chan struct{} doneCh chan struct{}
keys []jose.JSONWebKey
err error err error
} }
// Done returns a channel that is closed when the inflight request finishes. func newInflight() *inflight {
func (i *inflight) Done() <-chan struct{} { return &inflight{doneCh: make(chan struct{})}
return i.done
} }
// Err returns any error encountered during request execution. May be nil. // wait returns a channel that multiple goroutines can receive on. Once it returns
func (i *inflight) Err() error { // a value, the inflight request is done and result() can be inspected.
return i.err func (i *inflight) wait() <-chan struct{} {
return i.doneCh
} }
// Cancel signals completion of the inflight request with error err. // done can only be called by a single goroutine. It records the result of the
// Must be called only once for particular inflight instance. // inflight request and signals other goroutines that the result is safe to
func (i *inflight) Cancel(err error) { // inspect.
func (i *inflight) done(keys []jose.JSONWebKey, err error) {
i.keys = keys
i.err = err i.err = err
close(i.done) close(i.doneCh)
} }
func (r *remoteKeySet) keysWithIDFromCache(keyIDs []string) ([]jose.JSONWebKey, bool) { // result cannot be called until the wait() channel has returned a value.
r.mu.Lock() func (i *inflight) result() ([]jose.JSONWebKey, error) {
keys, expiry := r.cachedKeys, r.expiry return i.keys, i.err
r.mu.Unlock() }
// Have the keys expired? func (r *remoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
if expiry.Add(keysExpiryDelta).Before(r.now()) { // We don't support JWTs signed with multiple signatures.
return nil, false keyID := ""
for _, sig := range jws.Signatures {
keyID = sig.Header.KeyID
break
} }
var signingKeys []jose.JSONWebKey keys, expiry := r.keysFromCache()
// Don't check expiry yet. This optimizes for when the provider is unavailable.
for _, key := range keys { for _, key := range keys {
if contains(keyIDs, key.KeyID) { if keyID == "" || key.KeyID == keyID {
signingKeys = append(signingKeys, key) if payload, err := jws.Verify(&key); err == nil {
return payload, nil
}
} }
} }
if len(signingKeys) == 0 { if !r.now().Add(keysExpiryDelta).After(expiry) {
// Are the keys about to expire? // Keys haven't expired, don't refresh.
if r.now().Add(keysExpiryDelta).After(expiry) { return nil, errors.New("failed to verify id token signature")
return nil, false
}
} }
return signingKeys, true keys, err := r.keysFromRemote(ctx)
} if err != nil {
func (r *remoteKeySet) keysWithID(ctx context.Context, keyIDs []string) ([]jose.JSONWebKey, error) { return nil, fmt.Errorf("fetching keys %v", err)
keys, ok := r.keysWithIDFromCache(keyIDs)
if ok {
return keys, nil
} }
var inflightCtx *inflight for _, key := range keys {
func() { if keyID == "" || key.KeyID == keyID {
r.mu.Lock() if payload, err := jws.Verify(&key); err == nil {
defer r.mu.Unlock() return payload, nil
}
// If there's not a current inflight request, create one.
if r.inflightCtx == nil {
inflightCtx := &inflight{make(chan struct{}), nil}
r.inflightCtx = inflightCtx
go func() {
// TODO(ericchiang): Upstream Kubernetes request that we recover every time
// we spawn a goroutine, because panics in a goroutine will bring down the
// entire program. There's no way to recover from another goroutine's panic.
//
// Most users actually want to let the panic propagate and bring down the
// program because it implies some unrecoverable state.
//
// Add a context key to allow the recover behavior.
//
// See: https://github.com/coreos/go-oidc/issues/89
// Sync keys and close inflightCtx when that's done.
// Use the remoteKeySet's context instead of the requests context
// because a re-sync is unique to the keys set and will span multiple
// requests.
inflightCtx.Cancel(r.updateKeys(r.ctx))
r.mu.Lock()
defer r.mu.Unlock()
r.inflightCtx = nil
}()
} }
}
return nil, errors.New("failed to verify id token signature")
}
func (r *remoteKeySet) keysFromCache() (keys []jose.JSONWebKey, expiry time.Time) {
r.mu.Lock()
defer r.mu.Unlock()
return r.cachedKeys, r.expiry
}
inflightCtx = r.inflightCtx // keysFromRemote syncs the key set from the remote set, records the values in the
}() // cache, and returns the key set.
func (r *remoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
// Need to lock to inspect the inflight request field.
r.mu.Lock()
// If there's not a current inflight request, create one.
if r.inflight == nil {
r.inflight = newInflight()
// This goroutine has exclusive ownership over the current inflight
// request. It releases the resource by nil'ing the inflight field
// once the goroutine is done.
go func() {
// Sync keys and finish inflight when that's done.
keys, expiry, err := r.updateKeys()
r.inflight.done(keys, err)
// Lock to update the keys and indicate that there is no longer an
// inflight request.
r.mu.Lock()
defer r.mu.Unlock()
if err == nil {
r.cachedKeys = keys
r.expiry = expiry
}
// Free inflight so a different request can run.
r.inflight = nil
}()
}
inflight := r.inflight
r.mu.Unlock()
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, ctx.Err()
case <-inflightCtx.Done(): case <-inflight.wait():
if err := inflightCtx.Err(); err != nil { return inflight.result()
return nil, err
}
} }
// Since we've just updated keys, we don't care about the cache miss.
keys, _ = r.keysWithIDFromCache(keyIDs)
return keys, nil
} }
func (r *remoteKeySet) updateKeys(ctx context.Context) error { func (r *remoteKeySet) updateKeys() ([]jose.JSONWebKey, time.Time, error) {
req, err := http.NewRequest("GET", r.jwksURL, nil) req, err := http.NewRequest("GET", r.jwksURL, nil)
if err != nil { if err != nil {
return fmt.Errorf("oidc: can't create request: %v", err) return nil, time.Time{}, fmt.Errorf("oidc: can't create request: %v", err)
} }
resp, err := doRequest(ctx, req) resp, err := doRequest(r.ctx, req)
if err != nil { if err != nil {
return fmt.Errorf("oidc: get keys failed %v", err) return nil, time.Time{}, fmt.Errorf("oidc: get keys failed %v", err)
} }
defer resp.Body.Close() defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return fmt.Errorf("oidc: read response body: %v", err) return nil, time.Time{}, fmt.Errorf("oidc: read response body: %v", err)
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body) return nil, time.Time{}, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
} }
var keySet jose.JSONWebKeySet var keySet jose.JSONWebKeySet
if err := json.Unmarshal(body, &keySet); err != nil { if err := json.Unmarshal(body, &keySet); err != nil {
return fmt.Errorf("oidc: failed to decode keys: %v %s", err, body) return nil, time.Time{}, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
} }
// If the server doesn't provide cache control headers, assume the // If the server doesn't provide cache control headers, assume the
...@@ -189,11 +201,5 @@ func (r *remoteKeySet) updateKeys(ctx context.Context) error { ...@@ -189,11 +201,5 @@ func (r *remoteKeySet) updateKeys(ctx context.Context) error {
if err == nil && e.After(expiry) { if err == nil && e.After(expiry) {
expiry = e expiry = e
} }
return keySet.Keys, expiry, nil
r.mu.Lock()
defer r.mu.Unlock()
r.cachedKeys = keySet.Keys
r.expiry = expiry
return nil
} }
package oidc package oidc
import ( import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"encoding/json" "encoding/json"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"reflect" "strconv"
"testing" "testing"
"time" "time"
"golang.org/x/net/context"
jose "gopkg.in/square/go-jose.v2" jose "gopkg.in/square/go-jose.v2"
) )
type keyServer struct { type keyServer struct {
keys jose.JSONWebKeySet keys jose.JSONWebKeySet
setHeaders func(h http.Header)
} }
func newKeyServer(keys ...jose.JSONWebKey) keyServer { func (k *keyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return keyServer{ if k.setHeaders != nil {
keys: jose.JSONWebKeySet{Keys: keys}, k.setHeaders(w.Header())
} }
}
func (k keyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if err := json.NewEncoder(w).Encode(k.keys); err != nil { if err := json.NewEncoder(w).Encode(k.keys); err != nil {
panic(err) panic(err)
} }
} }
func TestKeysFormID(t *testing.T) { type signingKey struct {
tests := []struct { keyID string // optional
name string priv interface{}
keys []jose.JSONWebKey pub interface{}
keyIDs []string alg jose.SignatureAlgorithm
wantKeys []jose.JSONWebKey }
}{
{ // sign creates a JWS using the private key from the provided payload.
name: "single key", func (s *signingKey) sign(t *testing.T, payload []byte) string {
keys: []jose.JSONWebKey{ privKey := &jose.JSONWebKey{Key: s.priv, Algorithm: string(s.alg), KeyID: s.keyID}
testKeyRSA_2048_0,
testKeyECDSA_256_0, signer, err := jose.NewSigner(jose.SigningKey{Algorithm: s.alg, Key: privKey}, nil)
}, if err != nil {
keyIDs: []string{ t.Fatal(err)
testKeyRSA_2048_0.KeyID, }
}, jws, err := signer.Sign(payload)
wantKeys: []jose.JSONWebKey{ if err != nil {
testKeyRSA_2048_0, t.Fatal(err)
}, }
},
{ data, err := jws.CompactSerialize()
name: "one key id matches", if err != nil {
keys: []jose.JSONWebKey{ t.Fatal(err)
testKeyRSA_2048_0, }
testKeyECDSA_256_0, return data
}, }
keyIDs: []string{
testKeyRSA_2048_0.KeyID, // jwk returns the public part of the signing key.
testKeyRSA_2048_1.KeyID, func (s *signingKey) jwk() jose.JSONWebKey {
}, return jose.JSONWebKey{Key: s.pub, Use: "sig", Algorithm: string(s.alg), KeyID: s.keyID}
wantKeys: []jose.JSONWebKey{ }
testKeyRSA_2048_0,
}, func newRSAKey(t *testing.T) *signingKey {
priv, err := rsa.GenerateKey(rand.Reader, 1028)
if err != nil {
t.Fatal(err)
}
return &signingKey{"", priv, priv.Public(), jose.RS256}
}
func newECDSAKey(t *testing.T) *signingKey {
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
return &signingKey{"", priv, priv.Public(), jose.ES256}
}
func TestRSAVerify(t *testing.T) {
good := newRSAKey(t)
bad := newRSAKey(t)
testKeyVerify(t, good, bad, good)
}
func TestECDSAVerify(t *testing.T) {
good := newECDSAKey(t)
bad := newECDSAKey(t)
testKeyVerify(t, good, bad, good)
}
func TestMultipleKeysVerify(t *testing.T) {
key1 := newRSAKey(t)
key2 := newRSAKey(t)
bad := newECDSAKey(t)
key1.keyID = "key1"
key2.keyID = "key2"
bad.keyID = "key3"
testKeyVerify(t, key2, bad, key1, key2)
}
func TestMismatchedKeyID(t *testing.T) {
key1 := newRSAKey(t)
key2 := newRSAKey(t)
// shallow copy
bad := new(signingKey)
*bad = *key1
// The bad key is a valid key this time, but has a different Key ID.
// It shouldn't match key1 because of the mismatched ID, even though
// it would confirm the signature just fine.
bad.keyID = "key3"
key1.keyID = "key1"
key2.keyID = "key2"
testKeyVerify(t, key2, bad, key1, key2)
}
func testKeyVerify(t *testing.T, good, bad *signingKey, verification ...*signingKey) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
keySet := jose.JSONWebKeySet{}
for _, v := range verification {
keySet.Keys = append(keySet.Keys, v.jwk())
}
payload := []byte("a secret")
jws, err := jose.ParseSigned(good.sign(t, payload))
if err != nil {
t.Fatal(err)
}
badJWS, err := jose.ParseSigned(bad.sign(t, payload))
if err != nil {
t.Fatal(err)
}
s := httptest.NewServer(&keyServer{keys: keySet})
defer s.Close()
rks := newRemoteKeySet(ctx, s.URL, nil)
// Ensure the token verifies.
gotPayload, err := rks.verify(ctx, jws)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(gotPayload, payload) {
t.Errorf("expected payload %s got %s", payload, gotPayload)
}
// Ensure the token verifies from the cache.
gotPayload, err = rks.verify(ctx, jws)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(gotPayload, payload) {
t.Errorf("expected payload %s got %s", payload, gotPayload)
}
// Ensure item signed by wrong token doesn't verify.
if _, err := rks.verify(context.Background(), badJWS); err == nil {
t.Errorf("incorrectly verified signature")
}
}
func TestCacheControl(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
key1 := newRSAKey(t)
key2 := newRSAKey(t)
key1.keyID = "key1"
key2.keyID = "key2"
payload := []byte("a secret")
jws1, err := jose.ParseSigned(key1.sign(t, payload))
if err != nil {
t.Fatal(err)
}
jws2, err := jose.ParseSigned(key2.sign(t, payload))
if err != nil {
t.Fatal(err)
}
cacheForSeconds := 1200
now := time.Now()
server := &keyServer{
keys: jose.JSONWebKeySet{
Keys: []jose.JSONWebKey{key1.jwk()},
}, },
{ setHeaders: func(h http.Header) {
name: "no valid keys", h.Set("Cache-Control", "max-age="+strconv.Itoa(cacheForSeconds))
keys: []jose.JSONWebKey{
testKeyRSA_2048_1,
testKeyECDSA_256_0,
},
keyIDs: []string{
testKeyRSA_2048_0.KeyID,
},
}, },
} }
s := httptest.NewServer(server)
defer s.Close()
rks := newRemoteKeySet(ctx, s.URL, func() time.Time { return now })
if _, err := rks.verify(ctx, jws1); err != nil {
t.Errorf("failed to verify valid signature: %v", err)
}
if _, err := rks.verify(ctx, jws2); err == nil {
t.Errorf("incorrectly verified signature")
}
t0 := time.Now() // Add second key to public list.
now := func() time.Time { return t0 } server.keys = jose.JSONWebKeySet{
Keys: []jose.JSONWebKey{key1.jwk(), key2.jwk()},
}
for _, test := range tests { if _, err := rks.verify(ctx, jws1); err != nil {
func() { t.Errorf("failed to verify valid signature: %v", err)
ctx, cancel := context.WithCancel(context.Background()) }
defer cancel() if _, err := rks.verify(ctx, jws2); err == nil {
t.Errorf("incorrectly verified signature, still within cache limit")
}
server := httptest.NewServer(newKeyServer(test.keys...)) // Move time forward. Remote key set should not query the remote server.
defer server.Close() now = now.Add(time.Duration(cacheForSeconds) * time.Second)
keySet := newRemoteKeySet(ctx, server.URL, now) if _, err := rks.verify(ctx, jws1); err != nil {
gotKeys, err := keySet.keysWithID(ctx, test.keyIDs) t.Errorf("failed to verify valid signature: %v", err)
if err != nil { }
t.Errorf("%s: %v", test.name, err) if _, err := rks.verify(ctx, jws2); err != nil {
return t.Errorf("failed to verify valid signature: %v", err)
} }
if !reflect.DeepEqual(gotKeys, test.wantKeys) {
t.Errorf("%s: expected keys=%#v, got=%#v", test.name, test.wantKeys, gotKeys) // Kill server and move time forward again. Keys should still verify.
} s.Close()
}() now = now.Add(time.Duration(cacheForSeconds) * time.Second)
if _, err := rks.verify(ctx, jws1); err != nil {
t.Errorf("failed to verify valid signature: %v", err)
}
if _, err := rks.verify(ctx, jws2); err != nil {
t.Errorf("failed to verify valid signature: %v", err)
} }
} }
...@@ -232,7 +232,8 @@ type IDToken struct { ...@@ -232,7 +232,8 @@ type IDToken struct {
// Initial nonce provided during the authentication redirect. // Initial nonce provided during the authentication redirect.
// //
// If present, this package ensures this is a valid nonce. // This package does NOT provided verification on the value of this field
// and it's the user's responsibility to ensure it contains a valid value.
Nonce string Nonce string
// Raw payload of the id_token. // Raw payload of the id_token.
...@@ -285,13 +286,6 @@ func (a *audience) UnmarshalJSON(b []byte) error { ...@@ -285,13 +286,6 @@ func (a *audience) UnmarshalJSON(b []byte) error {
return nil return nil
} }
func (a audience) MarshalJSON() ([]byte, error) {
if len(a) == 1 {
return json.Marshal(a[0])
}
return json.Marshal([]string(a))
}
type jsonTime time.Time type jsonTime time.Time
func (j *jsonTime) UnmarshalJSON(b []byte) error { func (j *jsonTime) UnmarshalJSON(b []byte) error {
...@@ -313,7 +307,3 @@ func (j *jsonTime) UnmarshalJSON(b []byte) error { ...@@ -313,7 +307,3 @@ func (j *jsonTime) UnmarshalJSON(b []byte) error {
*j = jsonTime(time.Unix(unix, 0)) *j = jsonTime(time.Unix(unix, 0))
return nil return nil
} }
func (j jsonTime) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Time(j).Unix())
}
...@@ -13,3 +13,4 @@ go test -v -i -race github.com/coreos/go-oidc/... ...@@ -13,3 +13,4 @@ go test -v -i -race github.com/coreos/go-oidc/...
go test -v -race github.com/coreos/go-oidc/... go test -v -race github.com/coreos/go-oidc/...
golint $LINTABLE golint $LINTABLE
go vet github.com/coreos/go-oidc/... go vet github.com/coreos/go-oidc/...
go build -v ./example/...
...@@ -19,9 +19,15 @@ const ( ...@@ -19,9 +19,15 @@ const (
issuerGoogleAccountsNoScheme = "accounts.google.com" issuerGoogleAccountsNoScheme = "accounts.google.com"
) )
// keySet is an interface that lets us stub out verification policies for
// testing. Outside of testing, it's always backed by a remoteKeySet.
type keySet interface {
verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error)
}
// IDTokenVerifier provides verification for ID Tokens. // IDTokenVerifier provides verification for ID Tokens.
type IDTokenVerifier struct { type IDTokenVerifier struct {
keySet *remoteKeySet keySet keySet
config *Config config *Config
issuer string issuer string
} }
...@@ -34,12 +40,6 @@ type Config struct { ...@@ -34,12 +40,6 @@ type Config struct {
// //
// If not provided, users must explicitly set SkipClientIDCheck. // If not provided, users must explicitly set SkipClientIDCheck.
ClientID string ClientID string
// Method to verify the ID Token nonce. If a nonce is present and this method
// is nil, users must explicitly set SkipNonceCheck.
//
// If the ID Token nonce is empty, for example if the client didn't provide a nonce in
// the initial redirect, this may be nil.
ClaimNonce func(nonce string) error
// If specified, only this set of algorithms may be used to sign the JWT. // If specified, only this set of algorithms may be used to sign the JWT.
// //
// Since many providers only support RS256, SupportedSigningAlgs defaults to this value. // Since many providers only support RS256, SupportedSigningAlgs defaults to this value.
...@@ -49,8 +49,6 @@ type Config struct { ...@@ -49,8 +49,6 @@ type Config struct {
SkipClientIDCheck bool SkipClientIDCheck bool
// If true, token expiry is not checked. // If true, token expiry is not checked.
SkipExpiryCheck bool SkipExpiryCheck bool
// If true, nonce claim is not checked. Must be true if ClaimNonce field is empty.
SkipNonceCheck bool
// Time function to check Token expiry. Defaults to time.Now // Time function to check Token expiry. Defaults to time.Now
Now func() time.Time Now func() time.Time
...@@ -65,7 +63,7 @@ func (p *Provider) Verifier(config *Config) *IDTokenVerifier { ...@@ -65,7 +63,7 @@ func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
return newVerifier(p.remoteKeySet, config, p.issuer) return newVerifier(p.remoteKeySet, config, p.issuer)
} }
func newVerifier(keySet *remoteKeySet, config *Config, issuer string) *IDTokenVerifier { func newVerifier(keySet keySet, config *Config, issuer string) *IDTokenVerifier {
// If SupportedSigningAlgs is empty defaults to only support RS256. // If SupportedSigningAlgs is empty defaults to only support RS256.
if len(config.SupportedSigningAlgs) == 0 { if len(config.SupportedSigningAlgs) == 0 {
config.SupportedSigningAlgs = []string{RS256} config.SupportedSigningAlgs = []string{RS256}
...@@ -102,6 +100,8 @@ func contains(sli []string, ele string) bool { ...@@ -102,6 +100,8 @@ func contains(sli []string, ele string) bool {
// Verify parses a raw ID Token, verifies it's been signed by the provider, preforms // Verify parses a raw ID Token, verifies it's been signed by the provider, preforms
// any additional checks depending on the Config, and returns the payload. // any additional checks depending on the Config, and returns the payload.
// //
// Verify does NOT do nonce validation, which is the callers responsibility.
//
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation // See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
// //
// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code")) // oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
...@@ -181,37 +181,22 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok ...@@ -181,37 +181,22 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok
} }
} }
// If a set of required algorithms has been provided, ensure that the signatures use those. switch len(jws.Signatures) {
var keyIDs, gotAlgs []string case 0:
for _, sig := range jws.Signatures { return nil, fmt.Errorf("oidc: id token not signed")
if len(v.config.SupportedSigningAlgs) == 0 || contains(v.config.SupportedSigningAlgs, sig.Header.Algorithm) { case 1:
keyIDs = append(keyIDs, sig.Header.KeyID) default:
} else { return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
gotAlgs = append(gotAlgs, sig.Header.Algorithm)
}
}
if len(keyIDs) == 0 {
return nil, fmt.Errorf("oidc: no signatures use a supported algorithm, expected %q got %q", v.config.SupportedSigningAlgs, gotAlgs)
} }
// Get keys from the remote key set. This may trigger a re-sync. sig := jws.Signatures[0]
keys, err := v.keySet.keysWithID(ctx, keyIDs) if len(v.config.SupportedSigningAlgs) != 0 && !contains(v.config.SupportedSigningAlgs, sig.Header.Algorithm) {
if err != nil { return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", v.config.SupportedSigningAlgs, sig.Header.Algorithm)
return nil, fmt.Errorf("oidc: get keys for id token: %v", err)
}
if len(keys) == 0 {
return nil, fmt.Errorf("oidc: no keys match signature ID(s) %q", keyIDs)
} }
// Try to use a key to validate the signature. gotPayload, err := v.keySet.verify(ctx, jws)
var gotPayload []byte if err != nil {
for _, key := range keys { return nil, fmt.Errorf("failed to verify signature: %v", err)
if p, err := jws.Verify(&key); err == nil {
gotPayload = p
}
}
if len(gotPayload) == 0 {
return nil, fmt.Errorf("oidc: failed to verify id token")
} }
// Ensure that the payload returned by the square actually matches the payload parsed earlier. // Ensure that the payload returned by the square actually matches the payload parsed earlier.
...@@ -219,19 +204,6 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok ...@@ -219,19 +204,6 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload") return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
} }
// Check the nonce after we've verified the token. We don't want to allow unverified
// payloads to trigger a nonce lookup.
// If SkipNonceCheck is not set ClaimNonce cannot be Nil.
if !v.config.SkipNonceCheck && t.Nonce != "" {
if v.config.ClaimNonce != nil {
if err := v.config.ClaimNonce(t.Nonce); err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("oidc: Invalid configuration. ClaimNonce must be provided or SkipNonceCheck must be set.")
}
}
return t, nil return t, nil
} }
......
package oidc package oidc
import ( import (
"crypto/ecdsa" "context"
"crypto/elliptic" "strconv"
"crypto/rsa"
"encoding/json"
"net/http/httptest"
"testing" "testing"
"time" "time"
"golang.org/x/net/context"
jose "gopkg.in/square/go-jose.v2" jose "gopkg.in/square/go-jose.v2"
) )
type testVerifier struct {
jwk jose.JSONWebKey
}
func (t *testVerifier) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
return jws.Verify(&t.jwk)
}
func TestVerify(t *testing.T) { func TestVerify(t *testing.T) {
tests := []verificationTest{ tests := []verificationTest{
{ {
name: "good token", name: "good token",
idToken: idToken{ idToken: `{"iss":"https://foo"}`,
Issuer: "https://foo",
},
config: Config{ config: Config{
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0},
}, },
{ {
name: "invalid issuer", name: "invalid issuer",
idToken: idToken{ issuer: "https://bar",
Issuer: "foo", idToken: `{"iss":"https://foo"}`,
},
config: Config{ config: Config{
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0},
wantErr: true, wantErr: true,
}, },
{ {
name: "google accounts without scheme", name: "invalid sig",
issuer: "https://accounts.google.com", idToken: `{"iss":"https://foo"}`,
idToken: idToken{
Issuer: "accounts.google.com",
},
config: Config{ config: Config{
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0}, verificationKey: newRSAKey(t),
wantErr: true,
}, },
{ {
name: "expired token", name: "google accounts without scheme",
idToken: idToken{ issuer: "https://accounts.google.com",
Issuer: "https://foo", idToken: `{"iss":"accounts.google.com"}`,
Expiry: jsonTime(time.Now().Add(-time.Hour)), config: Config{
SkipClientIDCheck: true,
SkipExpiryCheck: true,
}, },
signKey: newRSAKey(t),
},
{
name: "expired token",
idToken: `{"iss":"https://foo","exp":` + strconv.FormatInt(time.Now().Add(-time.Hour).Unix(), 10) + `}`,
config: Config{ config: Config{
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0},
wantErr: true, wantErr: true,
}, },
{ {
name: "invalid signature", name: "unexpired token",
idToken: idToken{ idToken: `{"iss":"https://foo","exp":` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `}`,
Issuer: "https://foo", config: Config{
SkipClientIDCheck: true,
}, },
signKey: newRSAKey(t),
},
{
name: "expiry as float",
idToken: `{"iss":"https://foo","exp":` +
strconv.FormatFloat(float64(time.Now().Add(time.Hour).Unix()), 'E', -1, 64) +
`}`,
config: Config{ config: Config{
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_1},
wantErr: true,
}, },
} }
for _, test := range tests { for _, test := range tests {
test.run(t) t.Run(test.name, test.run)
} }
} }
func TestVerifyAudience(t *testing.T) { func TestVerifyAudience(t *testing.T) {
tests := []verificationTest{ tests := []verificationTest{
{ {
name: "good audience", name: "good audience",
idToken: idToken{ idToken: `{"iss":"https://foo","aud":"client1"}`,
Issuer: "https://foo",
Audience: []string{"client1"},
},
config: Config{ config: Config{
ClientID: "client1", ClientID: "client1",
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0},
}, },
{ {
name: "mismatched audience", name: "mismatched audience",
idToken: idToken{ idToken: `{"iss":"https://foo","aud":"client2"}`,
Issuer: "https://foo",
Audience: []string{"client2"},
},
config: Config{ config: Config{
ClientID: "client1", ClientID: "client1",
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0},
wantErr: true, wantErr: true,
}, },
{ {
name: "multiple audiences, one matches", name: "multiple audiences, one matches",
idToken: idToken{ idToken: `{"iss":"https://foo","aud":["client1","client2"]}`,
Issuer: "https://foo",
Audience: []string{"client2", "client1"},
},
config: Config{ config: Config{
ClientID: "client1", ClientID: "client2",
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0},
}, },
} }
for _, test := range tests { for _, test := range tests {
test.run(t) t.Run(test.name, test.run)
} }
} }
func TestVerifySigningAlg(t *testing.T) { func TestVerifySigningAlg(t *testing.T) {
tests := []verificationTest{ tests := []verificationTest{
{ {
name: "default signing alg", name: "default signing alg",
idToken: idToken{ idToken: `{"iss":"https://foo"}`,
Issuer: "https://foo",
},
config: Config{ config: Config{
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newRSAKey(t),
signAlg: RS256, // By default we only support RS256.
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0},
}, },
{ {
name: "bad signing alg", name: "bad signing alg",
idToken: idToken{ idToken: `{"iss":"https://foo"}`,
Issuer: "https://foo",
},
config: Config{ config: Config{
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signKey: testKeyRSA_2048_0_Priv, signKey: newECDSAKey(t),
signAlg: RS512,
pubKeys: []jose.JSONWebKey{testKeyRSA_2048_0},
wantErr: true, wantErr: true,
}, },
{ {
name: "ecdsa signing", name: "ecdsa signing",
idToken: idToken{ idToken: `{"iss":"https://foo"}`,
Issuer: "https://foo",
},
config: Config{ config: Config{
SupportedSigningAlgs: []string{ES384}, SupportedSigningAlgs: []string{ES256},
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signAlg: ES384, signKey: newECDSAKey(t),
signKey: testKeyECDSA_384_0_Priv,
pubKeys: []jose.JSONWebKey{testKeyECDSA_384_0},
}, },
{ {
name: "one of many supported", name: "one of many supported",
idToken: idToken{ idToken: `{"iss":"https://foo"}`,
Issuer: "https://foo",
},
config: Config{ config: Config{
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
SupportedSigningAlgs: []string{RS256, ES384}, SupportedSigningAlgs: []string{RS256, ES256},
}, },
signAlg: ES384, signKey: newECDSAKey(t),
signKey: testKeyECDSA_384_0_Priv,
pubKeys: []jose.JSONWebKey{testKeyECDSA_384_0},
}, },
{ {
name: "not in requiredAlgs", name: "not in requiredAlgs",
idToken: idToken{ idToken: `{"iss":"https://foo"}`,
Issuer: "https://foo",
},
config: Config{ config: Config{
SupportedSigningAlgs: []string{RS256, ES512}, SupportedSigningAlgs: []string{RS256, ES512},
SkipClientIDCheck: true, SkipClientIDCheck: true,
SkipNonceCheck: true,
SkipExpiryCheck: true, SkipExpiryCheck: true,
}, },
signAlg: ES384, signKey: newECDSAKey(t),
signKey: testKeyECDSA_384_0_Priv,
pubKeys: []jose.JSONWebKey{testKeyECDSA_384_0},
wantErr: true, wantErr: true,
}, },
} }
for _, test := range tests { for _, test := range tests {
test.run(t) t.Run(test.name, test.run)
} }
} }
type verificationTest struct { type verificationTest struct {
// Name of the subtest.
name string name string
// if not provided defaults to "https://foo" // If not provided defaults to "https://foo"
issuer string issuer string
// ID token claims and a signing key to create the JWT. // JWT payload (just the claims).
idToken idToken idToken string
signKey jose.JSONWebKey
// If supplied use this signing algorithm. If not, guess
// from the signingKey.
signAlg string
config Config // Key to sign the ID Token with.
pubKeys []jose.JSONWebKey signKey *signingKey
// If not provided defaults to signKey. Only useful when
// testing invalid signatures.
verificationKey *signingKey
config Config
wantErr bool wantErr bool
} }
func algForKey(t *testing.T, k jose.JSONWebKey) string {
switch key := k.Key.(type) {
case *rsa.PrivateKey:
return RS256
case *ecdsa.PrivateKey:
name := key.PublicKey.Params().Name
switch name {
case elliptic.P256().Params().Name:
return ES256
case elliptic.P384().Params().Name:
return ES384
case elliptic.P521().Params().Name:
return ES512
}
t.Fatalf("unsupported ecdsa curve: %s", name)
default:
t.Fatalf("unsupported key type %T", key)
}
return ""
}
func (v verificationTest) run(t *testing.T) { func (v verificationTest) run(t *testing.T) {
payload, err := json.Marshal(v.idToken) token := v.signKey.sign(t, []byte(v.idToken))
if err != nil {
t.Fatal(err)
}
signingAlg := v.signAlg
if signingAlg == "" {
signingAlg = algForKey(t, v.signKey)
}
signer, err := jose.NewSigner(jose.SigningKey{
Algorithm: jose.SignatureAlgorithm(signingAlg),
Key: &v.signKey,
}, nil)
if err != nil {
t.Fatal(err)
}
jws, err := signer.Sign(payload)
if err != nil {
t.Fatal(err)
}
token, err := jws.CompactSerialize()
if err != nil {
t.Fatal(err)
}
t0 := time.Now()
now := func() time.Time { return t0 }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
server := httptest.NewServer(newKeyServer(v.pubKeys...))
defer server.Close()
issuer := "https://foo" issuer := "https://foo"
if v.issuer != "" { if v.issuer != "" {
issuer = v.issuer issuer = v.issuer
} }
verifier := newVerifier(newRemoteKeySet(ctx, server.URL, now), &v.config, issuer) var ks keySet
if v.verificationKey == nil {
ks = &testVerifier{v.signKey.jwk()}
} else {
ks = &testVerifier{v.verificationKey.jwk()}
}
verifier := newVerifier(ks, &v.config, issuer)
if _, err := verifier.Verify(ctx, token); err != nil { if _, err := verifier.Verify(ctx, token); err != nil {
if !v.wantErr { if !v.wantErr {
......
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include $(GOROOT)/src/Make.inc
TARG=shlex
GOFILES=\
shlex.go\
include $(GOROOT)/src/Make.pkg
go-shlex is a simple lexer for go that supports shell-style quoting,
commenting, and escaping.
This diff is collapsed.
/*
Copyright 2012 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package shlex
import (
"strings"
"testing"
)
func checkError(err error, t *testing.T) {
if err != nil {
t.Error(err)
}
}
func TestClassifier(t *testing.T) {
classifier := NewDefaultClassifier()
runeTests := map[int32]RuneTokenType{
'a': RUNETOKEN_CHAR,
' ': RUNETOKEN_SPACE,
'"': RUNETOKEN_ESCAPING_QUOTE,
'\'': RUNETOKEN_NONESCAPING_QUOTE,
'#': RUNETOKEN_COMMENT}
for rune, expectedType := range runeTests {
foundType := classifier.ClassifyRune(rune)
if foundType != expectedType {
t.Logf("Expected type: %v for rune '%c'(%v). Found type: %v.", expectedType, rune, rune, foundType)
t.Fail()
}
}
}
func TestTokenizer(t *testing.T) {
testInput := strings.NewReader("one two \"three four\" \"five \\\"six\\\"\" seven#eight # nine # ten\n eleven")
expectedTokens := []*Token{
&Token{
tokenType: TOKEN_WORD,
value: "one"},
&Token{
tokenType: TOKEN_WORD,
value: "two"},
&Token{
tokenType: TOKEN_WORD,
value: "three four"},
&Token{
tokenType: TOKEN_WORD,
value: "five \"six\""},
&Token{
tokenType: TOKEN_WORD,
value: "seven#eight"},
&Token{
tokenType: TOKEN_COMMENT,
value: " nine # ten"},
&Token{
tokenType: TOKEN_WORD,
value: "eleven"}}
tokenizer, err := NewTokenizer(testInput)
checkError(err, t)
for _, expectedToken := range expectedTokens {
foundToken, err := tokenizer.NextToken()
checkError(err, t)
if !foundToken.Equal(expectedToken) {
t.Error("Expected token:", expectedToken, ". Found:", foundToken)
}
}
}
func TestLexer(t *testing.T) {
testInput := strings.NewReader("one")
expectedWord := "one"
lexer, err := NewLexer(testInput)
checkError(err, t)
foundWord, err := lexer.NextWord()
checkError(err, t)
if expectedWord != foundWord {
t.Error("Expected word:", expectedWord, ". Found:", foundWord)
}
}
func TestSplitSimple(t *testing.T) {
testInput := "one two three"
expectedOutput := []string{"one", "two", "three"}
foundOutput, err := Split(testInput)
if err != nil {
t.Error("Split returned error:", err)
}
if len(expectedOutput) != len(foundOutput) {
t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results")
}
for i := range foundOutput {
if foundOutput[i] != expectedOutput[i] {
t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i])
}
}
}
func TestSplitEscapingQuotes(t *testing.T) {
testInput := "one \"два ${three}\" four"
expectedOutput := []string{"one", "два ${three}", "four"}
foundOutput, err := Split(testInput)
if err != nil {
t.Error("Split returned error:", err)
}
if len(expectedOutput) != len(foundOutput) {
t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results")
}
for i := range foundOutput {
if foundOutput[i] != expectedOutput[i] {
t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i])
}
}
}
func TestGlobbingExpressions(t *testing.T) {
testInput := "onefile *file one?ile onefil[de]"
expectedOutput := []string{"onefile", "*file", "one?ile", "onefil[de]"}
foundOutput, err := Split(testInput)
if err != nil {
t.Error("Split returned error", err)
}
if len(expectedOutput) != len(foundOutput) {
t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results")
}
for i := range foundOutput {
if foundOutput[i] != expectedOutput[i] {
t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i])
}
}
}
func TestSplitNonEscapingQuotes(t *testing.T) {
testInput := "one 'два ${three}' four"
expectedOutput := []string{"one", "два ${three}", "four"}
foundOutput, err := Split(testInput)
if err != nil {
t.Error("Split returned error:", err)
}
if len(expectedOutput) != len(foundOutput) {
t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results")
}
for i := range foundOutput {
if foundOutput[i] != expectedOutput[i] {
t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i])
}
}
}
...@@ -20,6 +20,9 @@ import ( ...@@ -20,6 +20,9 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"github.com/mailru/easyjson/jlexer"
"github.com/mailru/easyjson/jwriter"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
) )
...@@ -40,48 +43,150 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { ...@@ -40,48 +43,150 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) {
} }
func BytesToYAMLDoc(data []byte) (interface{}, error) { func BytesToYAMLDoc(data []byte) (interface{}, error) {
var document map[interface{}]interface{} var canary map[interface{}]interface{} // validate this is an object and not a different type
if err := yaml.Unmarshal(data, &document); err != nil { if err := yaml.Unmarshal(data, &canary); err != nil {
return nil, err return nil, err
} }
var document yaml.MapSlice // preserve order that is present in the document
if err := yaml.Unmarshal(data, &document); err != nil {
return nil, err
}
return document, nil return document, nil
} }
func transformData(in interface{}) (out interface{}, err error) { type JSONMapSlice []JSONMapItem
switch in.(type) {
func (s JSONMapSlice) MarshalJSON() ([]byte, error) {
w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty}
s.MarshalEasyJSON(w)
return w.BuildBytes()
}
func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) {
w.RawByte('{')
ln := len(s)
last := ln - 1
for i := 0; i < ln; i++ {
s[i].MarshalEasyJSON(w)
if i != last { // last item
w.RawByte(',')
}
}
w.RawByte('}')
}
func (s *JSONMapSlice) UnmarshalJSON(data []byte) error {
l := jlexer.Lexer{Data: data}
s.UnmarshalEasyJSON(&l)
return l.Error()
}
func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) {
if in.IsNull() {
in.Skip()
return
}
var result JSONMapSlice
in.Delim('{')
for !in.IsDelim('}') {
var mi JSONMapItem
mi.UnmarshalEasyJSON(in)
result = append(result, mi)
}
*s = result
}
type JSONMapItem struct {
Key string
Value interface{}
}
func (s JSONMapItem) MarshalJSON() ([]byte, error) {
w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty}
s.MarshalEasyJSON(w)
return w.BuildBytes()
}
func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) {
w.String(s.Key)
w.RawByte(':')
w.Raw(WriteJSON(s.Value))
}
func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) {
key := in.UnsafeString()
in.WantColon()
value := in.Interface()
in.WantComma()
s.Key = key
s.Value = value
}
func (s *JSONMapItem) UnmarshalJSON(data []byte) error {
l := jlexer.Lexer{Data: data}
s.UnmarshalEasyJSON(&l)
return l.Error()
}
func transformData(input interface{}) (out interface{}, err error) {
switch in := input.(type) {
case yaml.MapSlice:
o := make(JSONMapSlice, len(in))
for i, mi := range in {
var nmi JSONMapItem
switch k := mi.Key.(type) {
case string:
nmi.Key = k
case int:
nmi.Key = strconv.Itoa(k)
default:
return nil, fmt.Errorf("types don't match expect map key string or int got: %T", mi.Key)
}
v, err := transformData(mi.Value)
if err != nil {
return nil, err
}
nmi.Value = v
o[i] = nmi
}
return o, nil
case map[interface{}]interface{}: case map[interface{}]interface{}:
o := make(map[string]interface{}) o := make(JSONMapSlice, 0, len(in))
for k, v := range in.(map[interface{}]interface{}) { for ke, va := range in {
sk := "" var nmi JSONMapItem
switch k.(type) { switch k := ke.(type) {
case string: case string:
sk = k.(string) nmi.Key = k
case int: case int:
sk = strconv.Itoa(k.(int)) nmi.Key = strconv.Itoa(k)
default: default:
return nil, fmt.Errorf("types don't match: expect map key string or int get: %T", k) return nil, fmt.Errorf("types don't match expect map key string or int got: %T", ke)
} }
v, err = transformData(v)
v, err := transformData(va)
if err != nil { if err != nil {
return nil, err return nil, err
} }
o[sk] = v nmi.Value = v
o = append(o, nmi)
} }
return o, nil return o, nil
case []interface{}: case []interface{}:
in1 := in.([]interface{}) len1 := len(in)
len1 := len(in1)
o := make([]interface{}, len1) o := make([]interface{}, len1)
for i := 0; i < len1; i++ { for i := 0; i < len1; i++ {
o[i], err = transformData(in1[i]) o[i], err = transformData(in[i])
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
return o, nil return o, nil
} }
return in, nil return input, nil
} }
// YAMLDoc loads a yaml document from either http or a file and converts it to json // YAMLDoc loads a yaml document from either http or a file and converts it to json
......
...@@ -21,6 +21,8 @@ import ( ...@@ -21,6 +21,8 @@ import (
"net/http/httptest" "net/http/httptest"
"testing" "testing"
yaml "gopkg.in/yaml.v2"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
...@@ -56,34 +58,35 @@ func TestLoadHTTPBytes(t *testing.T) { ...@@ -56,34 +58,35 @@ func TestLoadHTTPBytes(t *testing.T) {
func TestYAMLToJSON(t *testing.T) { func TestYAMLToJSON(t *testing.T) {
data := make(map[interface{}]interface{}) sd := `---
data[1] = "the int key value" 1: the int key value
data["name"] = "a string value" name: a string value
data["y"] = "some value" 'y': some value
`
var data yaml.MapSlice
yaml.Unmarshal([]byte(sd), &data)
d, err := YAMLToJSON(data) d, err := YAMLToJSON(data)
if assert.NoError(t, err) { if assert.NoError(t, err) {
assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value"}`, string(d)) assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value"}`, string(d))
} }
data[true] = "the bool value" data = append(data, yaml.MapItem{Key: true, Value: "the bool value"})
d, err = YAMLToJSON(data) d, err = YAMLToJSON(data)
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, d) assert.Nil(t, d)
delete(data, true) data = data[:len(data)-1]
tag := make(map[interface{}]interface{}) tag := yaml.MapSlice{{Key: "name", Value: "tag name"}}
tag["name"] = "tag name" data = append(data, yaml.MapItem{Key: "tag", Value: tag})
data["tag"] = tag
d, err = YAMLToJSON(data) d, err = YAMLToJSON(data)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, `{"1":"the int key value","name":"a string value","tag":{"name":"tag name"},"y":"some value"}`, string(d)) assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value","tag":{"name":"tag name"}}`, string(d))
tag = make(map[interface{}]interface{}) tag = yaml.MapSlice{{Key: true, Value: "bool tag name"}}
tag[true] = "bool tag name" data = append(data[:len(data)-1], yaml.MapItem{Key: "tag", Value: tag})
data["tag"] = tag
d, err = YAMLToJSON(data) d, err = YAMLToJSON(data)
assert.Error(t, err) assert.Error(t, err)
......
...@@ -22,7 +22,7 @@ To use this software, you must: ...@@ -22,7 +22,7 @@ To use this software, you must:
for details or, if you are using gccgo, follow the instructions at for details or, if you are using gccgo, follow the instructions at
https://golang.org/doc/install/gccgo https://golang.org/doc/install/gccgo
- Grab the code from the repository and install the proto package. - Grab the code from the repository and install the proto package.
The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
The compiler plugin, protoc-gen-go, will be installed in $GOBIN, The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
defaulting to $GOPATH/bin. It must be in your $PATH for the protocol defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
compiler, protoc, to find it. compiler, protoc, to find it.
...@@ -104,7 +104,6 @@ for a protocol buffer variable v: ...@@ -104,7 +104,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences: When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers. - Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method. - Enum types do not get an Enum method.
Consider file test.proto, containing Consider file test.proto, containing
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build go1.7
package proto_test package proto_test
import ( import (
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build go1.7
package proto_test package proto_test
import ( import (
......
...@@ -73,7 +73,6 @@ for a protocol buffer variable v: ...@@ -73,7 +73,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences: When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers. - Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method. - Enum types do not get an Enum method.
The simplest way to describe this is to see an example. The simplest way to describe this is to see an example.
......
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements functions to marshal proto.Message to/from
// google.protobuf.Any message.
import (
"fmt"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
)
const googleApis = "type.googleapis.com/"
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
//
// Note that regular type assertions should be done using the Is
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return any.TypeUrl[slash+1:], nil
}
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
func MarshalAny(pb proto.Message) (*any.Any, error) {
value, err := proto.Marshal(pb)
if err != nil {
return nil, err
}
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in a google.protobuf.Any
// message. The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct {
proto.Message
}
// Empty returns a new proto.Message of the type specified in a
// google.protobuf.Any message. It returns an error if corresponding message
// type isn't linked in.
func Empty(any *any.Any) (proto.Message, error) {
aname, err := AnyMessageName(any)
if err != nil {
return nil, err
}
t := proto.MessageType(aname)
if t == nil {
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
}
return reflect.New(t.Elem()).Interface().(proto.Message), nil
}
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
// message and places the decoded result in pb. It returns an error if type of
// contents of Any message does not match type of pb message.
//
// pb can be a proto.Message, or a *DynamicAny.
func UnmarshalAny(any *any.Any, pb proto.Message) error {
if d, ok := pb.(*DynamicAny); ok {
if d.Message == nil {
var err error
d.Message, err = Empty(any)
if err != nil {
return err
}
}
return UnmarshalAny(any, d.Message)
}
aname, err := AnyMessageName(any)
if err != nil {
return err
}
mname := proto.MessageName(pb)
if aname != mname {
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
}
return proto.Unmarshal(any.Value, pb)
}
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
aname, err := AnyMessageName(any)
if err != nil {
return false
}
return aname == proto.MessageName(pb)
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 184 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab,
0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5,
0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00,
}
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "github.com/golang/protobuf/ptypes/any";
option java_package = "com.google.protobuf";
option java_outer_classname = "AnyProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
message Any {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
string type_url = 1;
// Must be a valid serialized protocol buffer of the above specified type.
bytes value = 2;
}
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
import (
"testing"
"github.com/golang/protobuf/proto"
pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
"github.com/golang/protobuf/ptypes/any"
)
func TestMarshalUnmarshal(t *testing.T) {
orig := &any.Any{Value: []byte("test")}
packed, err := MarshalAny(orig)
if err != nil {
t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err)
}
unpacked := &any.Any{}
err = UnmarshalAny(packed, unpacked)
if err != nil || !proto.Equal(unpacked, orig) {
t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig)
}
}
func TestIs(t *testing.T) {
a, err := MarshalAny(&pb.FileDescriptorProto{})
if err != nil {
t.Fatal(err)
}
if Is(a, &pb.DescriptorProto{}) {
t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is")
}
if !Is(a, &pb.FileDescriptorProto{}) {
t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not")
}
}
func TestIsDifferentUrlPrefixes(t *testing.T) {
m := &pb.FileDescriptorProto{}
a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)}
if !Is(a, m) {
t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m))
}
}
func TestUnmarshalDynamic(t *testing.T) {
want := &pb.FileDescriptorProto{Name: proto.String("foo")}
a, err := MarshalAny(want)
if err != nil {
t.Fatal(err)
}
var got DynamicAny
if err := UnmarshalAny(a, &got); err != nil {
t.Fatal(err)
}
if !proto.Equal(got.Message, want) {
t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want)
}
}
func TestEmpty(t *testing.T) {
want := &pb.FileDescriptorProto{}
a, err := MarshalAny(want)
if err != nil {
t.Fatal(err)
}
got, err := Empty(a)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(got, want) {
t.Errorf("unequal empty message, got %q, want %q", got, want)
}
// that's a valid type_url for a message which shouldn't be linked into this
// test binary. We want an error.
a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask"
if _, err := Empty(a); err == nil {
t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl)
}
}
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package ptypes contains code for interacting with well-known types.
*/
package ptypes
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements conversions between google.protobuf.Duration
// and time.Duration.
import (
"errors"
"fmt"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// validateDuration determines whether the durpb.Duration is valid according to the
// definition in google/protobuf/duration.proto. A valid durpb.Duration
// may still be too large to fit into a time.Duration (the range of durpb.Duration
// is about 10,000 years, and the range of time.Duration is about 290).
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
// Duration converts a durpb.Duration to a time.Duration. Duration
// returns an error if the durpb.Duration is invalid or is too large to be
// represented in a time.Duration.
func Duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos)
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durpb.Duration.
func DurationProto(d time.Duration) *durpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durpb.Duration{
Seconds: secs,
Nanos: int32(nanos),
}
}
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
import (
"math"
"testing"
"time"
"github.com/golang/protobuf/proto"
durpb "github.com/golang/protobuf/ptypes/duration"
)
const (
minGoSeconds = math.MinInt64 / int64(1e9)
maxGoSeconds = math.MaxInt64 / int64(1e9)
)
var durationTests = []struct {
proto *durpb.Duration
isValid bool
inRange bool
dur time.Duration
}{
// The zero duration.
{&durpb.Duration{0, 0}, true, true, 0},
// Some ordinary non-zero durations.
{&durpb.Duration{100, 0}, true, true, 100 * time.Second},
{&durpb.Duration{-100, 0}, true, true, -100 * time.Second},
{&durpb.Duration{100, 987}, true, true, 100*time.Second + 987},
{&durpb.Duration{-100, -987}, true, true, -(100*time.Second + 987)},
// The largest duration representable in Go.
{&durpb.Duration{maxGoSeconds, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64},
// The smallest duration representable in Go.
{&durpb.Duration{minGoSeconds, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64},
{nil, false, false, 0},
{&durpb.Duration{-100, 987}, false, false, 0},
{&durpb.Duration{100, -987}, false, false, 0},
{&durpb.Duration{math.MinInt64, 0}, false, false, 0},
{&durpb.Duration{math.MaxInt64, 0}, false, false, 0},
// The largest valid duration.
{&durpb.Duration{maxSeconds, 1e9 - 1}, true, false, 0},
// The smallest valid duration.
{&durpb.Duration{minSeconds, -(1e9 - 1)}, true, false, 0},
// The smallest invalid duration above the valid range.
{&durpb.Duration{maxSeconds + 1, 0}, false, false, 0},
// The largest invalid duration below the valid range.
{&durpb.Duration{minSeconds - 1, -(1e9 - 1)}, false, false, 0},
// One nanosecond past the largest duration representable in Go.
{&durpb.Duration{maxGoSeconds, int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0},
// One nanosecond past the smallest duration representable in Go.
{&durpb.Duration{minGoSeconds, int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0},
// One second past the largest duration representable in Go.
{&durpb.Duration{maxGoSeconds + 1, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0},
// One second past the smallest duration representable in Go.
{&durpb.Duration{minGoSeconds - 1, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0},
}
func TestValidateDuration(t *testing.T) {
for _, test := range durationTests {
err := validateDuration(test.proto)
gotValid := (err == nil)
if gotValid != test.isValid {
t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid)
}
}
}
func TestDuration(t *testing.T) {
for _, test := range durationTests {
got, err := Duration(test.proto)
gotOK := (err == nil)
wantOK := test.isValid && test.inRange
if gotOK != wantOK {
t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK)
}
if err == nil && got != test.dur {
t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur)
}
}
}
func TestDurationProto(t *testing.T) {
for _, test := range durationTests {
if test.isValid && test.inRange {
got := DurationProto(test.dur)
if !proto.Equal(got, test.proto) {
t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto)
}
}
}
}
#!/bin/bash -e
#
# This script fetches and rebuilds the "well-known types" protocol buffers.
# To run this you will need protoc and goprotobuf installed;
# see https://github.com/golang/protobuf for instructions.
# You also need Go and Git installed.
PKG=github.com/golang/protobuf/ptypes
UPSTREAM=https://github.com/google/protobuf
UPSTREAM_SUBDIR=src/google/protobuf
PROTO_FILES='
any.proto
duration.proto
empty.proto
struct.proto
timestamp.proto
wrappers.proto
'
function die() {
echo 1>&2 $*
exit 1
}
# Sanity check that the right tools are accessible.
for tool in go git protoc protoc-gen-go; do
q=$(which $tool) || die "didn't find $tool"
echo 1>&2 "$tool: $q"
done
tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
trap 'rm -rf $tmpdir' EXIT
echo -n 1>&2 "finding package dir... "
pkgdir=$(go list -f '{{.Dir}}' $PKG)
echo 1>&2 $pkgdir
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
echo 1>&2 "base: $base"
cd $base
echo 1>&2 "fetching latest protos... "
git clone -q $UPSTREAM $tmpdir
# Pass 1: build mapping from upstream filename to our filename.
declare -A filename_map
for f in $(cd $PKG && find * -name '*.proto'); do
echo -n 1>&2 "looking for latest version of $f... "
up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
echo 1>&2 $up
if [ $(echo $up | wc -w) != "1" ]; then
die "not exactly one match"
fi
filename_map[$up]=$f
done
# Pass 2: copy files
for up in "${!filename_map[@]}"; do
f=${filename_map[$up]}
shortname=$(basename $f | sed 's,\.proto$,,')
cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
done
# Run protoc once per package.
for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
echo 1>&2 "* $dir"
protoc --go_out=. $dir/*.proto
done
echo 1>&2 "All OK"
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements operations on google.protobuf.Timestamp.
import (
"errors"
"fmt"
"time"
tspb "github.com/golang/protobuf/ptypes/timestamp"
)
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range
// [0001-01-01, 10000-01-01) and has a Nanos field
// in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes
// the problem.
//
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
func validateTimestamp(ts *tspb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return value
// is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
seconds := t.Unix()
nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
Seconds: seconds,
Nanos: nanos,
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
// Timestamps, it returns an error message in parentheses.
func TimestampString(ts *tspb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
import (
"math"
"testing"
"time"
"github.com/golang/protobuf/proto"
tspb "github.com/golang/protobuf/ptypes/timestamp"
)
var tests = []struct {
ts *tspb.Timestamp
valid bool
t time.Time
}{
// The timestamp representing the Unix epoch date.
{&tspb.Timestamp{0, 0}, true, utcDate(1970, 1, 1)},
// The smallest representable timestamp.
{&tspb.Timestamp{math.MinInt64, math.MinInt32}, false,
time.Unix(math.MinInt64, math.MinInt32).UTC()},
// The smallest representable timestamp with non-negative nanos.
{&tspb.Timestamp{math.MinInt64, 0}, false, time.Unix(math.MinInt64, 0).UTC()},
// The earliest valid timestamp.
{&tspb.Timestamp{minValidSeconds, 0}, true, utcDate(1, 1, 1)},
//"0001-01-01T00:00:00Z"},
// The largest representable timestamp.
{&tspb.Timestamp{math.MaxInt64, math.MaxInt32}, false,
time.Unix(math.MaxInt64, math.MaxInt32).UTC()},
// The largest representable timestamp with nanos in range.
{&tspb.Timestamp{math.MaxInt64, 1e9 - 1}, false,
time.Unix(math.MaxInt64, 1e9-1).UTC()},
// The largest valid timestamp.
{&tspb.Timestamp{maxValidSeconds - 1, 1e9 - 1}, true,
time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)},
// The smallest invalid timestamp that is larger than the valid range.
{&tspb.Timestamp{maxValidSeconds, 0}, false, time.Unix(maxValidSeconds, 0).UTC()},
// A date before the epoch.
{&tspb.Timestamp{-281836800, 0}, true, utcDate(1961, 1, 26)},
// A date after the epoch.
{&tspb.Timestamp{1296000000, 0}, true, utcDate(2011, 1, 26)},
// A date after the epoch, in the middle of the day.
{&tspb.Timestamp{1296012345, 940483}, true,
time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)},
}
func TestValidateTimestamp(t *testing.T) {
for _, s := range tests {
got := validateTimestamp(s.ts)
if (got == nil) != s.valid {
t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid)
}
}
}
func TestTimestamp(t *testing.T) {
for _, s := range tests {
got, err := Timestamp(s.ts)
if (err == nil) != s.valid {
t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid)
} else if s.valid && got != s.t {
t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t)
}
}
// Special case: a nil Timestamp is an error, but returns the 0 Unix time.
got, err := Timestamp(nil)
want := time.Unix(0, 0).UTC()
if got != want {
t.Errorf("Timestamp(nil) = %v, want %v", got, want)
}
if err == nil {
t.Errorf("Timestamp(nil) error = nil, expected error")
}
}
func TestTimestampProto(t *testing.T) {
for _, s := range tests {
got, err := TimestampProto(s.t)
if (err == nil) != s.valid {
t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid)
} else if s.valid && !proto.Equal(got, s.ts) {
t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts)
}
}
// No corresponding special case here: no time.Time results in a nil Timestamp.
}
func TestTimestampString(t *testing.T) {
for _, test := range []struct {
ts *tspb.Timestamp
want string
}{
// Not much testing needed because presumably time.Format is
// well-tested.
{&tspb.Timestamp{0, 0}, "1970-01-01T00:00:00Z"},
{&tspb.Timestamp{minValidSeconds - 1, 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"},
} {
got := TimestampString(test.ts)
if got != test.want {
t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want)
}
}
}
func utcDate(year, month, day int) time.Time {
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
}
...@@ -50,7 +50,7 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) ...@@ -50,7 +50,7 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option)
gRPCComponentTag, gRPCComponentTag,
) )
defer clientSpan.Finish() defer clientSpan.Finish()
md, ok := metadata.FromContext(ctx) md, ok := metadata.FromOutgoingContext(ctx)
if !ok { if !ok {
md = metadata.New(nil) md = metadata.New(nil)
} else { } else {
...@@ -62,7 +62,7 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) ...@@ -62,7 +62,7 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option)
if err != nil { if err != nil {
clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err))
} }
ctx = metadata.NewContext(ctx, md) ctx = metadata.NewOutgoingContext(ctx, md)
if otgrpcOpts.logPayloads { if otgrpcOpts.logPayloads {
clientSpan.LogFields(log.Object("gRPC request", req)) clientSpan.LogFields(log.Object("gRPC request", req))
} }
...@@ -72,8 +72,8 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) ...@@ -72,8 +72,8 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option)
clientSpan.LogFields(log.Object("gRPC response", resp)) clientSpan.LogFields(log.Object("gRPC response", resp))
} }
} else { } else {
clientSpan.LogFields(log.String("event", "gRPC error"), log.Error(err)) SetSpanTags(clientSpan, err, true)
ext.Error.Set(clientSpan, true) clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
} }
if otgrpcOpts.decorator != nil { if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(clientSpan, method, req, resp, err) otgrpcOpts.decorator(clientSpan, method, req, resp, err)
......
package otgrpc
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/opentracing/opentracing-go/mocktracer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
firstCode = codes.OK
lastCode = codes.DataLoss
)
func TestSpanTags(t *testing.T) {
tracer := mocktracer.New()
for code := firstCode; code <= lastCode; code++ {
// Client error
tracer.Reset()
span := tracer.StartSpan("test-trace-client")
err := status.Error(code, "")
SetSpanTags(span, err, true)
span.Finish()
// Assert added tags
rawSpan := tracer.FinishedSpans()[0]
expectedTags := map[string]interface{}{
"response_code": code,
"response_class": ErrorClass(err),
}
if err != nil {
expectedTags["error"] = true
}
assert.Equal(t, expectedTags, rawSpan.Tags())
// Server error
tracer.Reset()
span = tracer.StartSpan("test-trace-server")
err = status.Error(code, "")
SetSpanTags(span, err, false)
span.Finish()
// Assert added tags
rawSpan = tracer.FinishedSpans()[0]
expectedTags = map[string]interface{}{
"response_code": code,
"response_class": ErrorClass(err),
}
if err != nil && ErrorClass(err) == ServerError {
expectedTags["error"] = true
}
assert.Equal(t, expectedTags, rawSpan.Tags())
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment