diff --git a/.gitignore b/.gitignore
index 7eb9283..25d1ba3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,3 +24,4 @@ _testmain.go
*.exe
*.test
*.prof
+.idea
diff --git a/Dockerfile b/Dockerfile
index f5ecbf9..648a009 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
-FROM golang:1.8 AS buildimage
+FROM golang:1.15.8-alpine3.13 AS buildimage
-RUN apt-get update && apt-get install -y ca-certificates
+RUN apk update && apk add ca-certificates bash
COPY . /go/src/github.com/Codigami/gohaqd/
WORKDIR /go/src/github.com/Codigami/gohaqd
RUN CGO_ENABLED=0 GOOS=linux /bin/bash -c "bash check.sh && go build -a -v -ldflags '-w'"
diff --git a/README.md b/README.md
index a0dcdf4..209a8a4 100644
--- a/README.md
+++ b/README.md
@@ -15,6 +15,7 @@ It pulls data off a queue, inserts it into the message body, and sends an HTTP P
## Flags:
```
--aws-region string AWS Region for the SQS queue (default "us-east-1")
+ --aws-account-id string AWS Account ID for the SQS queue (default "")
-c, --config string config file path (default "./gohaqd.yaml")
--healthcheck-url string HTTP endpoint for checking if consumer server is up
-h, --help help for gohaqd
@@ -31,6 +32,7 @@ It pulls data off a queue, inserts it into the message body, and sends an HTTP P
queues:
- name: test
url: http://localhost:8080/consume
+ awsAccountId: 23494933939
- name: test2
- name: test3
parallel: 3
diff --git a/check.sh b/check.sh
index 49027ad..7f46473 100644
--- a/check.sh
+++ b/check.sh
@@ -5,10 +5,10 @@ set -x
PACKAGE_LIST=$(go list ./... | grep -v /vendor/)
-go get -u -v github.com/golang/lint/golint
+go get -u -v golang.org/x/lint/golint
go fmt $PACKAGE_LIST
go vet -v $PACKAGE_LIST
-echo "$PACKAGE_LIST" | xargs -L1 golint -set_exit_status
+echo "$PACKAGE_LIST" | xargs -I1 golint -set_exit_status
go test $PACKAGE_LIST
diff --git a/cmd/root.go b/cmd/root.go
index 51a4403..3147654 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -40,11 +40,12 @@ import (
// Queue holds information about each queue from where the messages are consumed
type Queue struct {
- Name string
- URL string
- Parallel int
- sem chan *sqs.Message
- msgparams *sqs.ReceiveMessageInput
+ Name string
+ AwsAccountID string `yaml:"awsAccountId"`
+ URL string
+ Parallel int
+ sem chan *sqs.Message
+ msgparams *sqs.ReceiveMessageInput
}
// Config stores the parsed yaml config file
@@ -62,6 +63,7 @@ var parallelRequests int
var svc *sqs.SQS
var httpClient *http.Client
var port int
+var awsAccountID string
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
@@ -87,6 +89,7 @@ func init() {
RootCmd.PersistentFlags().StringVarP(&url, "url", "u", "", "HTTP endpoint. Takes the URL from the message by default. (Used only when --config is not set and default config doesn't exist)")
RootCmd.PersistentFlags().StringVar(&healthcheckURL, "healthcheck-url", "", "HTTP endpoint for checking if consumer server is up")
+ RootCmd.PersistentFlags().StringVar(&awsAccountID, "aws-account-id", "", "AWS Account ID for the SQS queue")
RootCmd.PersistentFlags().StringVar(&awsRegion, "aws-region", "us-east-1", "AWS Region for the SQS queue")
RootCmd.PersistentFlags().StringVar(&sqsEndpoint, "sqs-endpoint", "", "SQS Endpoint for using with fake_sqs")
RootCmd.PersistentFlags().IntVar(¶llelRequests, "parallel", 1, "Number of messages to be consumed in parallel")
@@ -104,9 +107,10 @@ func startGohaqd(cmd *cobra.Command, args []string) {
log.Println("config file doesn't exist so using queueName from flag")
config.Queues = append(config.Queues, Queue{
- Name: queueName,
- URL: url,
- Parallel: parallelRequests,
+ AwsAccountID: awsAccountID,
+ Name: queueName,
+ URL: url,
+ Parallel: parallelRequests,
})
} else {
os.Exit(1)
@@ -125,7 +129,10 @@ func startGohaqd(cmd *cobra.Command, args []string) {
} else {
awsConfig = aws.NewConfig().WithRegion(awsRegion)
}
- sess := session.New(awsConfig)
+ sess, err := session.NewSession(awsConfig)
+ if err != nil {
+ log.Fatalln("Error while creating AWS session: " + err.Error())
+ }
svc = sqs.New(sess)
if healthcheckURL != "" {
@@ -156,6 +163,9 @@ func initializeQueue(queue Queue) {
qparams := &sqs.GetQueueUrlInput{
QueueName: aws.String(queue.Name),
}
+ if queue.AwsAccountID != "" {
+ qparams.QueueOwnerAWSAccountId = aws.String(queue.AwsAccountID)
+ }
q, err := svc.GetQueueUrl(qparams)
if err != nil {
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..3b5ee9f
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,22 @@
+module github.com/Codigami/gohaqd
+
+go 1.15
+
+require (
+ github.com/aws/aws-sdk-go v1.37.5
+ github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect
+ github.com/go-ini/ini v1.21.2-0.20161120031036-2ba15ac2dc9c // indirect
+ github.com/golang/protobuf v0.0.0-20160106020635-2402d76f3d41 // indirect
+ github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
+ github.com/prometheus/client_golang v0.8.0
+ github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 // indirect
+ github.com/prometheus/common v0.0.0-20170731114204-61f87aac8082 // indirect
+ github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8 // indirect
+ github.com/smartystreets/goconvey v1.6.4 // indirect
+ github.com/spf13/cobra v0.0.0-20170823073209-2df9a5318133
+ github.com/spf13/pflag v1.0.0 // indirect
+ github.com/stretchr/testify v1.7.0 // indirect
+ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+ gopkg.in/yaml.v2 v2.2.8
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..2da922b
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,72 @@
+github.com/aws/aws-sdk-go v1.4.22 h1:7cE0XlB+5lcGE2R6FBEfZqOPka+rXVcXHPeRS3VEu34=
+github.com/aws/aws-sdk-go v1.4.22/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
+github.com/aws/aws-sdk-go v1.37.5 h1:9zJ1aXRk1gLSWEeaMXa7Hbv1pIM915T2tpaIJi0+mkA=
+github.com/aws/aws-sdk-go v1.37.5/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-ini/ini v1.21.2-0.20161120031036-2ba15ac2dc9c h1:sgsTs+gMEkMAK/Hbf5Ut/PYCP2MTjDBrnvShpq7yFHo=
+github.com/go-ini/ini v1.21.2-0.20161120031036-2ba15ac2dc9c/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/golang/protobuf v0.0.0-20160106020635-2402d76f3d41 h1:BIDtr9YECqqvixqxNnfN1Dp4dlRZB2nS68tywI+YZj4=
+github.com/golang/protobuf v0.0.0-20160106020635-2402d76f3d41/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 h1:13pIdM2tpaDi4OVe24fgoIS7ZTqMt0QI+bwQsX5hq+g=
+github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.0.0-20170731114204-61f87aac8082 h1:M/45ksQhBkhxI65UXRNvyuF6sV7A08GMYk39aGZQlJQ=
+github.com/prometheus/common v0.0.0-20170731114204-61f87aac8082/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8 h1:Kh7M6mzRpQ2de1rixoSQZr4BTINXFm8WDbeN5ttnwyE=
+github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/spf13/cobra v0.0.0-20170823073209-2df9a5318133 h1:k5T0NKT0QzMzEaqufAzrZjPlFdCuPwXlngPZlLehiJU=
+github.com/spf13/cobra v0.0.0-20170823073209-2df9a5318133/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/pflag v1.0.0 h1:oaPbdDe/x0UncahuwiPxW1GYJyilRAdsPnq3e1yaPcI=
+github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v2 v2.0.0-20160715033755-e4d366fc3c79 h1:mENkfeXGmLV7lIyBeNdwYWdONek7pH9yHaHMgZyvIWE=
+gopkg.in/yaml.v2 v2.0.0-20160715033755-e4d366fc3c79/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/aws/aws-sdk-go/.gitignore b/vendor/github.com/aws/aws-sdk-go/.gitignore
deleted file mode 100644
index fb11cec..0000000
--- a/vendor/github.com/aws/aws-sdk-go/.gitignore
+++ /dev/null
@@ -1,11 +0,0 @@
-dist
-/doc
-/doc-staging
-.yardoc
-Gemfile.lock
-awstesting/integration/smoke/**/importmarker__.go
-awstesting/integration/smoke/_test/
-/vendor/bin/
-/vendor/pkg/
-/vendor/src/
-/private/model/cli/gen-api/gen-api
diff --git a/vendor/github.com/aws/aws-sdk-go/.godoc_config b/vendor/github.com/aws/aws-sdk-go/.godoc_config
deleted file mode 100644
index 395878d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/.godoc_config
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "PkgHandler": {
- "Pattern": "/sdk-for-go/api/",
- "StripPrefix": "/sdk-for-go/api",
- "Include": ["/src/github.com/aws/aws-sdk-go/aws", "/src/github.com/aws/aws-sdk-go/service"],
- "Exclude": ["/src/cmd", "/src/github.com/aws/aws-sdk-go/awstesting", "/src/github.com/aws/aws-sdk-go/awsmigrate"],
- "IgnoredSuffixes": ["iface"]
- },
- "Github": {
- "Tag": "master",
- "Repo": "/aws/aws-sdk-go",
- "UseGithub": true
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/.travis.yml b/vendor/github.com/aws/aws-sdk-go/.travis.yml
deleted file mode 100644
index b76546d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-
-sudo: false
-
-go:
- - 1.4
- - 1.5
- - 1.6
- - 1.7
- - tip
-
-# Use Go 1.5's vendoring experiment for 1.5 tests. 1.4 tests will use the tip of the dependencies repo.
-env:
- - GO15VENDOREXPERIMENT=1
-
-install:
- - make get-deps
-
-script:
- - make unit-with-race-cover
-
-matrix:
- allow_failures:
- - go: tip
diff --git a/vendor/github.com/aws/aws-sdk-go/.yardopts b/vendor/github.com/aws/aws-sdk-go/.yardopts
deleted file mode 100644
index 07724e4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/.yardopts
+++ /dev/null
@@ -1,7 +0,0 @@
---plugin go
--e doc-src/plugin/plugin.rb
--m markdown
--o doc/api
---title "AWS SDK for Go"
-aws/**/*.go
-service/**/*.go
diff --git a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md
deleted file mode 100644
index 38ba675..0000000
--- a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md
+++ /dev/null
@@ -1,124 +0,0 @@
-Release v1.4.22 (2016-10-25)
-===
-
-Service Client Updates
----
-* `service/elasticloadbalancingv2`: Updates service documentation.
-* `service/autoscaling`: Updates service documentation.
-
-Release v1.4.21 (2016-10-24)
-===
-
-Service Client Updates
----
-* `service/sms`: AWS Server Migration Service (SMS) is an agentless service which makes it easier and faster for you to migrate thousands of on-premises workloads to AWS. AWS SMS allows you to automate, schedule, and track incremental replications of live server volumes, making it easier for you to coordinate large-scale server migrations.
-* `service/ecs`: Updates documentation.
-
-SDK Feature Updates
----
-* `private/models/api`: Improve code generation of documentation.
-
-Release v1.4.20 (2016-10-20)
-===
-
-Service Client Updates
----
-* `service/budgets`: Adds new service, AWS Budgets.
-* `service/waf`: Updates service documentation.
-
-Release v1.4.19 (2016-10-18)
-===
-
-Service Client Updates
----
-* `service/cloudfront`: Updates service API and documentation.
- * Ability to use Amazon CloudFront to deliver your content both via IPv6 and IPv4 using HTTP/HTTPS.
-* `service/configservice`: Update service API and documentation.
-* `service/iot`: Updates service API and documentation.
-* `service/kinesisanalytics`: Updates service API and documentation.
- * Whenever Amazon Kinesis Analytics is not able to detect schema for the given streaming source on DiscoverInputSchema API, we would return the raw records that was sampled to detect the schema.
-* `service/rds`: Updates service API and documentation.
- * Amazon Aurora integrates with other AWS services to allow you to extend your Aurora DB cluster to utilize other capabilities in the AWS cloud. Permission to access other AWS services is granted by creating an IAM role with the necessary permissions, and then associating the role with your DB cluster.
-
-SDK Feature Updates
----
-* `service/dynamodb/dynamodbattribute`: Add UnmarshalListOfMaps #897
- * Adds support for unmarshalling a list of maps. This is useful for unmarshalling the DynamoDB AttributeValue list of maps returned by APIs like Query and Scan.
-
-Release v1.4.18 (2016-10-17)
-===
-
-Service Model Updates
----
-* `service/route53`: Updates service API and documentation.
-
-Release v1.4.17
-===
-
-Service Model Updates
----
-* `service/acm`: Update service API, and documentation.
- * This change allows users to import third-party SSL/TLS certificates into ACM.
-* `service/elasticbeanstalk`: Update service API, documentation, and pagination.
- * Elastic Beanstalk DescribeApplicationVersions API is being updated to support pagination.
-* `service/gamelift`: Update service API, and documentation.
- * New APIs to protect game developer resource (builds, alias, fleets, instances, game sessions and player sessions) against abuse.
-
-SDK Features
----
-* `service/s3`: Add support for accelerate with dualstack [#887](https://github.com/aws/aws-sdk-go/issues/887)
-
-Release v1.4.16 (2016-10-13)
-===
-
-Service Model Updates
----
-* `service/ecr`: Update Amazon EC2 Container Registry service model
- * DescribeImages is a new api used to expose image metadata which today includes image size and image creation timestamp.
-* `service/elasticache`: Update Amazon ElastiCache service model
- * Elasticache is launching a new major engine release of Redis, 3.2 (providing stability updates and new command sets over 2.8), as well as ElasticSupport for enabling Redis Cluster in 3.2, which provides support for multiple node groups to horizontally scale data, as well as superior engine failover capabilities
-
-SDK Bug Fixes
----
-* `aws/session`: Skip shared config on read errors [#883](https://github.com/aws/aws-sdk-go/issues/883)
-* `aws/signer/v4`: Add support for URL.EscapedPath to signer [#885](https://github.com/aws/aws-sdk-go/issues/885)
-
-SDK Features
----
-* `private/model/api`: Add docs for errors to API operations [#881](https://github.com/aws/aws-sdk-go/issues/881)
-* `private/model/api`: Improve field and waiter doc strings [#879](https://github.com/aws/aws-sdk-go/issues/879)
-* `service/dynamodb/dynamodbattribute`: Allow multiple struct tag elements [#886](https://github.com/aws/aws-sdk-go/issues/886)
-* Add build tags to internal SDK tools [#880](https://github.com/aws/aws-sdk-go/issues/880)
-
-Release v1.4.15 (2016-10-06)
-===
-
-Service Model Updates
----
-* `service/cognitoidentityprovider`: Update Amazon Cognito Identity Provider service model
-* `service/devicefarm`: Update AWS Device Farm documentation
-* `service/opsworks`: Update AWS OpsWorks service model
-* `service/s3`: Update Amazon Simple Storage Service model
-* `service/waf`: Update AWS WAF service model
-
-SDK Bug Fixes
----
-* `aws/request`: Fix HTTP Request Body race condition [#874](https://github.com/aws/aws-sdk-go/issues/874)
-
-SDK Feature Updates
----
-* `aws/ec2metadata`: Add support for EC2 User Data [#872](https://github.com/aws/aws-sdk-go/issues/872)
-* `aws/signer/v4`: Remove logic determining if request needs to be resigned [#876](https://github.com/aws/aws-sdk-go/issues/876)
-
-Release v1.4.14 (2016-09-29)
-===
-* `service/ec2`: api, documentation, and paginators updates.
-* `service/s3`: api and documentation updates.
-
-Release v1.4.13 (2016-09-27)
-===
-* `service/codepipeline`: documentation updates.
-* `service/cloudformation`: api and documentation updates.
-* `service/kms`: documentation updates.
-* `service/elasticfilesystem`: documentation updates.
-* `service/snowball`: documentation updates.
diff --git a/vendor/github.com/aws/aws-sdk-go/Gemfile b/vendor/github.com/aws/aws-sdk-go/Gemfile
deleted file mode 100644
index 2fb295a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/Gemfile
+++ /dev/null
@@ -1,6 +0,0 @@
-source 'https://rubygems.org'
-
-gem 'yard', git: 'git://github.com/lsegal/yard', ref: '5025564a491e1b7c6192632cba2802202ca08449'
-gem 'yard-go', git: 'git://github.com/jasdel/yard-go', ref: 'e78e1ef7cdf5e0f3266845b26bb4fd64f1dd6f85'
-gem 'rdiscount'
-
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
deleted file mode 100644
index d645695..0000000
--- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go/Makefile b/vendor/github.com/aws/aws-sdk-go/Makefile
deleted file mode 100644
index 141ace5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/Makefile
+++ /dev/null
@@ -1,157 +0,0 @@
-LINTIGNOREDOT='awstesting/integration.+should not use dot imports'
-LINTIGNOREDOC='service/[^/]+/(api|service|waiters)\.go:.+(comment on exported|should have comment or be unexported)'
-LINTIGNORECONST='service/[^/]+/(api|service|waiters)\.go:.+(type|struct field|const|func) ([^ ]+) should be ([^ ]+)'
-LINTIGNORESTUTTER='service/[^/]+/(api|service)\.go:.+(and that stutters)'
-LINTIGNOREINFLECT='service/[^/]+/(api|service)\.go:.+method .+ should be '
-LINTIGNOREINFLECTS3UPLOAD='service/s3/s3manager/upload\.go:.+struct field SSEKMSKeyId should be '
-LINTIGNOREDEPS='vendor/.+\.go'
-UNIT_TEST_TAGS="example codegen"
-
-SDK_WITH_VENDOR_PKGS=$(shell go list -tags ${UNIT_TEST_TAGS} ./... | grep -v "/vendor/src")
-SDK_ONLY_PKGS=$(shell go list ./... | grep -v "/vendor/")
-SDK_UNIT_TEST_ONLY_PKGS=$(shell go list -tags ${UNIT_TEST_TAGS} ./... | grep -v "/vendor/")
-SDK_GO_1_4=$(shell go version | grep "go1.4")
-SDK_GO_1_5=$(shell go version | grep "go1.5")
-SDK_GO_VERSION=$(shell go version | awk '''{print $$3}''' | tr -d '''\n''')
-
-all: get-deps generate unit
-
-help:
- @echo "Please use \`make ' where is one of"
- @echo " api_info to print a list of services and versions"
- @echo " docs to build SDK documentation"
- @echo " build to go build the SDK"
- @echo " unit to run unit tests"
- @echo " integration to run integration tests"
- @echo " performance to run performance tests"
- @echo " verify to verify tests"
- @echo " lint to lint the SDK"
- @echo " vet to vet the SDK"
- @echo " generate to go generate and make services"
- @echo " gen-test to generate protocol tests"
- @echo " gen-services to generate services"
- @echo " get-deps to go get the SDK dependencies"
- @echo " get-deps-tests to get the SDK's test dependencies"
- @echo " get-deps-verify to get the SDK's verification dependencies"
-
-generate: gen-test gen-endpoints gen-services
-
-gen-test: gen-protocol-test
-
-gen-services:
- go generate ./service
-
-gen-protocol-test:
- go generate ./private/protocol/...
-
-gen-endpoints:
- go generate ./private/endpoints
-
-build:
- @echo "go build SDK and vendor packages"
- @go build ${SDK_ONLY_PKGS}
-
-unit: get-deps-tests build verify
- @echo "go test SDK and vendor packages"
- @go test -tags ${UNIT_TEST_TAGS} $(SDK_UNIT_TEST_ONLY_PKGS)
-
-unit-with-race-cover: get-deps-tests build verify
- @echo "go test SDK and vendor packages"
- @go test -tags ${UNIT_TEST_TAGS} -race -cpu=1,2,4 $(SDK_UNIT_TEST_ONLY_PKGS)
-
-integration: get-deps-tests integ-custom smoke-tests performance
-
-integ-custom:
- go test -tags "integration" ./awstesting/integration/customizations/...
-
-smoke-tests: get-deps-tests
- gucumber -go-tags "integration" ./awstesting/integration/smoke
-
-performance: get-deps-tests
- AWS_TESTING_LOG_RESULTS=${log-detailed} AWS_TESTING_REGION=$(region) AWS_TESTING_DB_TABLE=$(table) gucumber -go-tags "integration" ./awstesting/performance
-
-sandbox-tests: sandbox-test-go14 sandbox-test-go15 sandbox-test-go15-novendorexp sandbox-test-go16 sandbox-test-go17 sandbox-test-gotip
-
-sandbox-test-go14:
- docker build -f ./awstesting/sandbox/Dockerfile.test.go1.4 -t "aws-sdk-go-1.4" .
- docker run -t aws-sdk-go-1.4
-
-sandbox-test-go15:
- docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5 -t "aws-sdk-go-1.5" .
- docker run -t aws-sdk-go-1.5
-
-sandbox-test-go15-novendorexp:
- docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5-novendorexp -t "aws-sdk-go-1.5-novendorexp" .
- docker run -t aws-sdk-go-1.5-novendorexp
-
-sandbox-test-go16:
- docker build -f ./awstesting/sandbox/Dockerfile.test.go1.6 -t "aws-sdk-go-1.6" .
- docker run -t aws-sdk-go-1.6
-
-sandbox-test-go17:
- docker build -f ./awstesting/sandbox/Dockerfile.test.go1.7 -t "aws-sdk-go-1.7" .
- docker run -t aws-sdk-go-1.7
-
-sandbox-test-gotip:
- @echo "Run make update-aws-golang-tip, if this test fails because missing aws-golang:tip container"
- docker build -f ./awstesting/sandbox/Dockerfile.test.gotip -t "aws-sdk-go-tip" .
- docker run -t aws-sdk-go-tip
-
-update-aws-golang-tip:
- docker build -f ./awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" .
-
-verify: get-deps-verify lint vet
-
-lint:
- @echo "go lint SDK and vendor packages"
- @lint=`if [ \( -z "${SDK_GO_1_4}" \) -a \( -z "${SDK_GO_1_5}" \) ]; then golint ./...; else echo "skipping golint"; fi`; \
- lint=`echo "$$lint" | grep -E -v -e ${LINTIGNOREDOT} -e ${LINTIGNOREDOC} -e ${LINTIGNORECONST} -e ${LINTIGNORESTUTTER} -e ${LINTIGNOREINFLECT} -e ${LINTIGNOREDEPS} -e ${LINTIGNOREINFLECTS3UPLOAD}`; \
- echo "$$lint"; \
- if [ "$$lint" != "" ] && [ "$$lint" != "skipping golint" ]; then exit 1; fi
-
-SDK_BASE_FOLDERS=$(shell ls -d */ | grep -v vendor | grep -v awsmigrate)
-ifneq (,$(findstring go1.4, ${SDK_GO_VERSION}))
- GO_VET_CMD=echo skipping go vet, ${SDK_GO_VERSION}
-else ifneq (,$(findstring go1.6, ${SDK_GO_VERSION}))
- GO_VET_CMD=go tool vet --all -shadow -example=false
-else
- GO_VET_CMD=go tool vet --all -shadow
-endif
-
-vet:
- ${GO_VET_CMD} ${SDK_BASE_FOLDERS}
-
-get-deps: get-deps-tests get-deps-verify
- @echo "go get SDK dependencies"
- @go get -v $(SDK_ONLY_PKGS)
-
-get-deps-tests:
- @echo "go get SDK testing dependencies"
- go get github.com/gucumber/gucumber/cmd/gucumber
- go get github.com/stretchr/testify
- go get github.com/smartystreets/goconvey
- go get golang.org/x/net/html
-
-get-deps-verify:
- @echo "go get SDK verification utilities"
- @if [ \( -z "${SDK_GO_1_4}" \) -a \( -z "${SDK_GO_1_5}" \) ]; then go get github.com/golang/lint/golint; else echo "skipped getting golint"; fi
-
-bench:
- @echo "go bench SDK packages"
- @go test -run NONE -bench . -benchmem -tags 'bench' $(SDK_ONLY_PKGS)
-
-bench-protocol:
- @echo "go bench SDK protocol marshallers"
- @go test -run NONE -bench . -benchmem -tags 'bench' ./private/protocol/...
-
-docs:
- @echo "generate SDK docs"
- @# This env variable, DOCS, is for internal use
- @if [ -z ${AWS_DOC_GEN_TOOL} ]; then\
- rm -rf doc && bundle install && bundle exec yard;\
- else\
- $(AWS_DOC_GEN_TOOL) `pwd`;\
- fi
-
-api_info:
- @go run private/model/cli/api-info/api-info.go
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
deleted file mode 100644
index 5f14d11..0000000
--- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-AWS SDK for Go
-Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md
deleted file mode 100644
index 947643a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/README.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# AWS SDK for Go
-
-
-[](http://docs.aws.amazon.com/sdk-for-go/api)
-[](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-[](https://travis-ci.org/aws/aws-sdk-go)
-[](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
-
-
-aws-sdk-go is the official AWS SDK for the Go programming language.
-
-Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK.
-
-## Installing
-
-If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder.
-
- go get -u github.com/aws/aws-sdk-go
-
-Otherwise if your Go environment does not have vendoring support enabled, or you do not want to include the vendored SDK's dependencies you can use the following command to retrieve the SDK and its non-testing dependencies using `go get`.
-
- go get -u github.com/aws/aws-sdk-go/aws/...
- go get -u github.com/aws/aws-sdk-go/service/...
-
-If you're looking to retrieve just the SDK without any dependencies use the following command.
-
- go get -d github.com/aws/aws-sdk-go/
-
-These two processes will still include the `vendor` folder and it should be deleted if its not going to be used by your environment.
-
- rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor
-
-## Reference Documentation
-[`Getting Started Guide`](https://aws.amazon.com/sdk-for-go/) - This document is a general introduction how to configure and make requests with the SDK. If this is your first time using the SDK, this documentation and the API documentation will help you get started. This document focuses on the syntax and behavior of the SDK. The [Service Developer Guide](https://aws.amazon.com/documentation/) will help you get started using specific AWS services.
-
-[`SDK API Reference Documentation`](https://docs.aws.amazon.com/sdk-for-go/api/) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of the SDK, and examples how to using the SDK, service client API operations, and API operation require parameters.
-
-[`Service Developer Guide`](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with an AWS service. These are great guides both, if you're getting started with a service, or looking for more information on a service. You should not need this document for coding, though in some cases, services may supply helpful samples that you might want to look out for.
-
-[`SDK Examples`](https://github.com/aws/aws-sdk-go/tree/master/example) - Included in the SDK's repo are a several hand crafted examples using the SDK features and AWS services.
-
-## Configuring Credentials
-
-Before using the SDK, ensure that you've configured credentials. The best
-way to configure credentials on a development machine is to use the
-`~/.aws/credentials` file, which might look like:
-
-```
-[default]
-aws_access_key_id = AKID1234567890
-aws_secret_access_key = MY-SECRET-KEY
-```
-
-You can learn more about the credentials file from this
-[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs).
-
-Alternatively, you can set the following environment variables:
-
-```
-AWS_ACCESS_KEY_ID=AKID1234567890
-AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY
-```
-
-### AWS shared config file (`~/.aws/config`)
-The AWS SDK for Go added support the shared config file in release [v1.3.0](https://github.com/aws/aws-sdk-go/releases/tag/v1.3.0). You can opt into enabling support for the shared config by setting the environment variable `AWS_SDK_LOAD_CONFIG` to a truthy value. See the [Session](https://github.com/aws/aws-sdk-go/wiki/sessions) wiki for more information about this feature.
-
-## Using the Go SDK
-
-To use a service in the SDK, create a service variable by calling the `New()`
-function. Once you have a service client, you can call API operations which each
-return response data and a possible error.
-
-To list a set of instance IDs from EC2, you could run:
-
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/ec2"
-)
-
-func main() {
- // Create an EC2 service object in the "us-west-2" region
- // Note that you can also configure your region globally by
- // exporting the AWS_REGION environment variable
- svc := ec2.New(session.NewSession(), &aws.Config{Region: aws.String("us-west-2")})
-
- // Call the DescribeInstances Operation
- resp, err := svc.DescribeInstances(nil)
- if err != nil {
- panic(err)
- }
-
- // resp has all of the response data, pull out instance IDs:
- fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
- for idx, res := range resp.Reservations {
- fmt.Println(" > Number of instances: ", len(res.Instances))
- for _, inst := range resp.Reservations[idx].Instances {
- fmt.Println(" - Instance ID: ", *inst.InstanceId)
- }
- }
-}
-```
-
-You can find more information and operations in our
-[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/).
-
-## License
-
-This SDK is distributed under the
-[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0),
-see LICENSE.txt and NOTICE.txt for more information.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
deleted file mode 100644
index 56fdfc2..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Package awserr represents API error interface accessors for the SDK.
-package awserr
-
-// An Error wraps lower level errors with code, message and an original error.
-// The underlying concrete error type may also satisfy other interfaces which
-// can be to used to obtain more specific information about the error.
-//
-// Calling Error() or String() will always include the full information about
-// an error based on its underlying type.
-//
-// Example:
-//
-// output, err := s3manage.Upload(svc, input, opts)
-// if err != nil {
-// if awsErr, ok := err.(awserr.Error); ok {
-// // Get error details
-// log.Println("Error:", awsErr.Code(), awsErr.Message())
-//
-// // Prints out full error message, including original error if there was one.
-// log.Println("Error:", awsErr.Error())
-//
-// // Get original error
-// if origErr := awsErr.OrigErr(); origErr != nil {
-// // operate on original error.
-// }
-// } else {
-// fmt.Println(err.Error())
-// }
-// }
-//
-type Error interface {
- // Satisfy the generic error interface.
- error
-
- // Returns the short phrase depicting the classification of the error.
- Code() string
-
- // Returns the error details message.
- Message() string
-
- // Returns the original error if one was set. Nil is returned if not set.
- OrigErr() error
-}
-
-// BatchError is a batch of errors which also wraps lower level errors with
-// code, message, and original errors. Calling Error() will include all errors
-// that occurred in the batch.
-//
-// Deprecated: Replaced with BatchedErrors. Only defined for backwards
-// compatibility.
-type BatchError interface {
- // Satisfy the generic error interface.
- error
-
- // Returns the short phrase depicting the classification of the error.
- Code() string
-
- // Returns the error details message.
- Message() string
-
- // Returns the original error if one was set. Nil is returned if not set.
- OrigErrs() []error
-}
-
-// BatchedErrors is a batch of errors which also wraps lower level errors with
-// code, message, and original errors. Calling Error() will include all errors
-// that occurred in the batch.
-//
-// Replaces BatchError
-type BatchedErrors interface {
- // Satisfy the base Error interface.
- Error
-
- // Returns the original error if one was set. Nil is returned if not set.
- OrigErrs() []error
-}
-
-// New returns an Error object described by the code, message, and origErr.
-//
-// If origErr satisfies the Error interface it will not be wrapped within a new
-// Error object and will instead be returned.
-func New(code, message string, origErr error) Error {
- var errs []error
- if origErr != nil {
- errs = append(errs, origErr)
- }
- return newBaseError(code, message, errs)
-}
-
-// NewBatchError returns an BatchedErrors with a collection of errors as an
-// array of errors.
-func NewBatchError(code, message string, errs []error) BatchedErrors {
- return newBaseError(code, message, errs)
-}
-
-// A RequestFailure is an interface to extract request failure information from
-// an Error such as the request ID of the failed request returned by a service.
-// RequestFailures may not always have a requestID value if the request failed
-// prior to reaching the service such as a connection error.
-//
-// Example:
-//
-// output, err := s3manage.Upload(svc, input, opts)
-// if err != nil {
-// if reqerr, ok := err.(RequestFailure); ok {
-// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
-// } else {
-// log.Println("Error:", err.Error())
-// }
-// }
-//
-// Combined with awserr.Error:
-//
-// output, err := s3manage.Upload(svc, input, opts)
-// if err != nil {
-// if awsErr, ok := err.(awserr.Error); ok {
-// // Generic AWS Error with Code, Message, and original error (if any)
-// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
-//
-// if reqErr, ok := err.(awserr.RequestFailure); ok {
-// // A service error occurred
-// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
-// }
-// } else {
-// fmt.Println(err.Error())
-// }
-// }
-//
-type RequestFailure interface {
- Error
-
- // The status code of the HTTP response.
- StatusCode() int
-
- // The request ID returned by the service for a request failure. This will
- // be empty if no request ID is available such as the request failed due
- // to a connection error.
- RequestID() string
-}
-
-// NewRequestFailure returns a new request error wrapper for the given Error
-// provided.
-func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
- return newRequestError(err, statusCode, reqID)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
deleted file mode 100644
index 0202a00..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package awserr
-
-import "fmt"
-
-// SprintError returns a string of the formatted error code.
-//
-// Both extra and origErr are optional. If they are included their lines
-// will be added, but if they are not included their lines will be ignored.
-func SprintError(code, message, extra string, origErr error) string {
- msg := fmt.Sprintf("%s: %s", code, message)
- if extra != "" {
- msg = fmt.Sprintf("%s\n\t%s", msg, extra)
- }
- if origErr != nil {
- msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
- }
- return msg
-}
-
-// A baseError wraps the code and message which defines an error. It also
-// can be used to wrap an original error object.
-//
-// Should be used as the root for errors satisfying the awserr.Error. Also
-// for any error which does not fit into a specific error wrapper type.
-type baseError struct {
- // Classification of error
- code string
-
- // Detailed information about error
- message string
-
- // Optional original error this error is based off of. Allows building
- // chained errors.
- errs []error
-}
-
-// newBaseError returns an error object for the code, message, and errors.
-//
-// code is a short no whitespace phrase depicting the classification of
-// the error that is being created.
-//
-// message is the free flow string containing detailed information about the
-// error.
-//
-// origErrs is the error objects which will be nested under the new errors to
-// be returned.
-func newBaseError(code, message string, origErrs []error) *baseError {
- b := &baseError{
- code: code,
- message: message,
- errs: origErrs,
- }
-
- return b
-}
-
-// Error returns the string representation of the error.
-//
-// See ErrorWithExtra for formatting.
-//
-// Satisfies the error interface.
-func (b baseError) Error() string {
- size := len(b.errs)
- if size > 0 {
- return SprintError(b.code, b.message, "", errorList(b.errs))
- }
-
- return SprintError(b.code, b.message, "", nil)
-}
-
-// String returns the string representation of the error.
-// Alias for Error to satisfy the stringer interface.
-func (b baseError) String() string {
- return b.Error()
-}
-
-// Code returns the short phrase depicting the classification of the error.
-func (b baseError) Code() string {
- return b.code
-}
-
-// Message returns the error details message.
-func (b baseError) Message() string {
- return b.message
-}
-
-// OrigErr returns the original error if one was set. Nil is returned if no
-// error was set. This only returns the first element in the list. If the full
-// list is needed, use BatchedErrors.
-func (b baseError) OrigErr() error {
- switch len(b.errs) {
- case 0:
- return nil
- case 1:
- return b.errs[0]
- default:
- if err, ok := b.errs[0].(Error); ok {
- return NewBatchError(err.Code(), err.Message(), b.errs[1:])
- }
- return NewBatchError("BatchedErrors",
- "multiple errors occurred", b.errs)
- }
-}
-
-// OrigErrs returns the original errors if one was set. An empty slice is
-// returned if no error was set.
-func (b baseError) OrigErrs() []error {
- return b.errs
-}
-
-// So that the Error interface type can be included as an anonymous field
-// in the requestError struct and not conflict with the error.Error() method.
-type awsError Error
-
-// A requestError wraps a request or service error.
-//
-// Composed of baseError for code, message, and original error.
-type requestError struct {
- awsError
- statusCode int
- requestID string
-}
-
-// newRequestError returns a wrapped error with additional information for
-// request status code, and service requestID.
-//
-// Should be used to wrap all request which involve service requests. Even if
-// the request failed without a service response, but had an HTTP status code
-// that may be meaningful.
-//
-// Also wraps original errors via the baseError.
-func newRequestError(err Error, statusCode int, requestID string) *requestError {
- return &requestError{
- awsError: err,
- statusCode: statusCode,
- requestID: requestID,
- }
-}
-
-// Error returns the string representation of the error.
-// Satisfies the error interface.
-func (r requestError) Error() string {
- extra := fmt.Sprintf("status code: %d, request id: %s",
- r.statusCode, r.requestID)
- return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
-}
-
-// String returns the string representation of the error.
-// Alias for Error to satisfy the stringer interface.
-func (r requestError) String() string {
- return r.Error()
-}
-
-// StatusCode returns the wrapped status code for the error
-func (r requestError) StatusCode() int {
- return r.statusCode
-}
-
-// RequestID returns the wrapped requestID
-func (r requestError) RequestID() string {
- return r.requestID
-}
-
-// OrigErrs returns the original errors if one was set. An empty slice is
-// returned if no error was set.
-func (r requestError) OrigErrs() []error {
- if b, ok := r.awsError.(BatchedErrors); ok {
- return b.OrigErrs()
- }
- return []error{r.OrigErr()}
-}
-
-// An error list that satisfies the golang interface
-type errorList []error
-
-// Error returns the string representation of the error.
-//
-// Satisfies the error interface.
-func (e errorList) Error() string {
- msg := ""
- // How do we want to handle the array size being zero
- if size := len(e); size > 0 {
- for i := 0; i < size; i++ {
- msg += fmt.Sprintf("%s", e[i].Error())
- // We check the next index to see if it is within the slice.
- // If it is, then we append a newline. We do this, because unit tests
- // could be broken with the additional '\n'
- if i+1 < size {
- msg += "\n"
- }
- }
- }
- return msg
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
deleted file mode 100644
index 1a3d106..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package awsutil
-
-import (
- "io"
- "reflect"
- "time"
-)
-
-// Copy deeply copies a src structure to dst. Useful for copying request and
-// response structures.
-//
-// Can copy between structs of different type, but will only copy fields which
-// are assignable, and exist in both structs. Fields which are not assignable,
-// or do not exist in both structs are ignored.
-func Copy(dst, src interface{}) {
- dstval := reflect.ValueOf(dst)
- if !dstval.IsValid() {
- panic("Copy dst cannot be nil")
- }
-
- rcopy(dstval, reflect.ValueOf(src), true)
-}
-
-// CopyOf returns a copy of src while also allocating the memory for dst.
-// src must be a pointer type or this operation will fail.
-func CopyOf(src interface{}) (dst interface{}) {
- dsti := reflect.New(reflect.TypeOf(src).Elem())
- dst = dsti.Interface()
- rcopy(dsti, reflect.ValueOf(src), true)
- return
-}
-
-// rcopy performs a recursive copy of values from the source to destination.
-//
-// root is used to skip certain aspects of the copy which are not valid
-// for the root node of a object.
-func rcopy(dst, src reflect.Value, root bool) {
- if !src.IsValid() {
- return
- }
-
- switch src.Kind() {
- case reflect.Ptr:
- if _, ok := src.Interface().(io.Reader); ok {
- if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
- dst.Elem().Set(src)
- } else if dst.CanSet() {
- dst.Set(src)
- }
- } else {
- e := src.Type().Elem()
- if dst.CanSet() && !src.IsNil() {
- if _, ok := src.Interface().(*time.Time); !ok {
- dst.Set(reflect.New(e))
- } else {
- tempValue := reflect.New(e)
- tempValue.Elem().Set(src.Elem())
- // Sets time.Time's unexported values
- dst.Set(tempValue)
- }
- }
- if src.Elem().IsValid() {
- // Keep the current root state since the depth hasn't changed
- rcopy(dst.Elem(), src.Elem(), root)
- }
- }
- case reflect.Struct:
- t := dst.Type()
- for i := 0; i < t.NumField(); i++ {
- name := t.Field(i).Name
- srcVal := src.FieldByName(name)
- dstVal := dst.FieldByName(name)
- if srcVal.IsValid() && dstVal.CanSet() {
- rcopy(dstVal, srcVal, false)
- }
- }
- case reflect.Slice:
- if src.IsNil() {
- break
- }
-
- s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
- dst.Set(s)
- for i := 0; i < src.Len(); i++ {
- rcopy(dst.Index(i), src.Index(i), false)
- }
- case reflect.Map:
- if src.IsNil() {
- break
- }
-
- s := reflect.MakeMap(src.Type())
- dst.Set(s)
- for _, k := range src.MapKeys() {
- v := src.MapIndex(k)
- v2 := reflect.New(v.Type()).Elem()
- rcopy(v2, v, false)
- dst.SetMapIndex(k, v2)
- }
- default:
- // Assign the value if possible. If its not assignable, the value would
- // need to be converted and the impact of that may be unexpected, or is
- // not compatible with the dst type.
- if src.Type().AssignableTo(dst.Type()) {
- dst.Set(src)
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go
deleted file mode 100644
index 0e75c5e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package awsutil_test
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/stretchr/testify/assert"
-)
-
-func ExampleCopy() {
- type Foo struct {
- A int
- B []*string
- }
-
- // Create the initial value
- str1 := "hello"
- str2 := "bye bye"
- f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
-
- // Do the copy
- var f2 Foo
- awsutil.Copy(&f2, f1)
-
- // Print the result
- fmt.Println(awsutil.Prettify(f2))
-
- // Output:
- // {
- // A: 1,
- // B: ["hello","bye bye"]
- // }
-}
-
-func TestCopy1(t *testing.T) {
- type Bar struct {
- a *int
- B *int
- c int
- D int
- }
- type Foo struct {
- A int
- B []*string
- C map[string]*int
- D *time.Time
- E *Bar
- }
-
- // Create the initial value
- str1 := "hello"
- str2 := "bye bye"
- int1 := 1
- int2 := 2
- intPtr1 := 1
- intPtr2 := 2
- now := time.Now()
- f1 := &Foo{
- A: 1,
- B: []*string{&str1, &str2},
- C: map[string]*int{
- "A": &int1,
- "B": &int2,
- },
- D: &now,
- E: &Bar{
- &intPtr1,
- &intPtr2,
- 2,
- 3,
- },
- }
-
- // Do the copy
- var f2 Foo
- awsutil.Copy(&f2, f1)
-
- // Values are equal
- assert.Equal(t, f2.A, f1.A)
- assert.Equal(t, f2.B, f1.B)
- assert.Equal(t, f2.C, f1.C)
- assert.Equal(t, f2.D, f1.D)
- assert.Equal(t, f2.E.B, f1.E.B)
- assert.Equal(t, f2.E.D, f1.E.D)
-
- // But pointers are not!
- str3 := "nothello"
- int3 := 57
- f2.A = 100
- *f2.B[0] = str3
- *f2.C["B"] = int3
- *f2.D = time.Now()
- f2.E.a = &int3
- *f2.E.B = int3
- f2.E.c = 5
- f2.E.D = 5
- assert.NotEqual(t, f2.A, f1.A)
- assert.NotEqual(t, f2.B, f1.B)
- assert.NotEqual(t, f2.C, f1.C)
- assert.NotEqual(t, f2.D, f1.D)
- assert.NotEqual(t, f2.E.a, f1.E.a)
- assert.NotEqual(t, f2.E.B, f1.E.B)
- assert.NotEqual(t, f2.E.c, f1.E.c)
- assert.NotEqual(t, f2.E.D, f1.E.D)
-}
-
-func TestCopyNestedWithUnexported(t *testing.T) {
- type Bar struct {
- a int
- B int
- }
- type Foo struct {
- A string
- B Bar
- }
-
- f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}}
-
- var f2 Foo
- awsutil.Copy(&f2, f1)
-
- // Values match
- assert.Equal(t, f2.A, f1.A)
- assert.NotEqual(t, f2.B, f1.B)
- assert.NotEqual(t, f2.B.a, f1.B.a)
- assert.Equal(t, f2.B.B, f2.B.B)
-}
-
-func TestCopyIgnoreNilMembers(t *testing.T) {
- type Foo struct {
- A *string
- B []string
- C map[string]string
- }
-
- f := &Foo{}
- assert.Nil(t, f.A)
- assert.Nil(t, f.B)
- assert.Nil(t, f.C)
-
- var f2 Foo
- awsutil.Copy(&f2, f)
- assert.Nil(t, f2.A)
- assert.Nil(t, f2.B)
- assert.Nil(t, f2.C)
-
- fcopy := awsutil.CopyOf(f)
- f3 := fcopy.(*Foo)
- assert.Nil(t, f3.A)
- assert.Nil(t, f3.B)
- assert.Nil(t, f3.C)
-}
-
-func TestCopyPrimitive(t *testing.T) {
- str := "hello"
- var s string
- awsutil.Copy(&s, &str)
- assert.Equal(t, "hello", s)
-}
-
-func TestCopyNil(t *testing.T) {
- var s string
- awsutil.Copy(&s, nil)
- assert.Equal(t, "", s)
-}
-
-func TestCopyReader(t *testing.T) {
- var buf io.Reader = bytes.NewReader([]byte("hello world"))
- var r io.Reader
- awsutil.Copy(&r, buf)
- b, err := ioutil.ReadAll(r)
- assert.NoError(t, err)
- assert.Equal(t, []byte("hello world"), b)
-
- // empty bytes because this is not a deep copy
- b, err = ioutil.ReadAll(buf)
- assert.NoError(t, err)
- assert.Equal(t, []byte(""), b)
-}
-
-func TestCopyDifferentStructs(t *testing.T) {
- type SrcFoo struct {
- A int
- B []*string
- C map[string]*int
- SrcUnique string
- SameNameDiffType int
- unexportedPtr *int
- ExportedPtr *int
- }
- type DstFoo struct {
- A int
- B []*string
- C map[string]*int
- DstUnique int
- SameNameDiffType string
- unexportedPtr *int
- ExportedPtr *int
- }
-
- // Create the initial value
- str1 := "hello"
- str2 := "bye bye"
- int1 := 1
- int2 := 2
- f1 := &SrcFoo{
- A: 1,
- B: []*string{&str1, &str2},
- C: map[string]*int{
- "A": &int1,
- "B": &int2,
- },
- SrcUnique: "unique",
- SameNameDiffType: 1,
- unexportedPtr: &int1,
- ExportedPtr: &int2,
- }
-
- // Do the copy
- var f2 DstFoo
- awsutil.Copy(&f2, f1)
-
- // Values are equal
- assert.Equal(t, f2.A, f1.A)
- assert.Equal(t, f2.B, f1.B)
- assert.Equal(t, f2.C, f1.C)
- assert.Equal(t, "unique", f1.SrcUnique)
- assert.Equal(t, 1, f1.SameNameDiffType)
- assert.Equal(t, 0, f2.DstUnique)
- assert.Equal(t, "", f2.SameNameDiffType)
- assert.Equal(t, int1, *f1.unexportedPtr)
- assert.Nil(t, f2.unexportedPtr)
- assert.Equal(t, int2, *f1.ExportedPtr)
- assert.Equal(t, int2, *f2.ExportedPtr)
-}
-
-func ExampleCopyOf() {
- type Foo struct {
- A int
- B []*string
- }
-
- // Create the initial value
- str1 := "hello"
- str2 := "bye bye"
- f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
-
- // Do the copy
- v := awsutil.CopyOf(f1)
- var f2 *Foo = v.(*Foo)
-
- // Print the result
- fmt.Println(awsutil.Prettify(f2))
-
- // Output:
- // {
- // A: 1,
- // B: ["hello","bye bye"]
- // }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
deleted file mode 100644
index 59fa4a5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package awsutil
-
-import (
- "reflect"
-)
-
-// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
-// In addition to this, this method will also dereference the input values if
-// possible so the DeepEqual performed will not fail if one parameter is a
-// pointer and the other is not.
-//
-// DeepEqual will not perform indirection of nested values of the input parameters.
-func DeepEqual(a, b interface{}) bool {
- ra := reflect.Indirect(reflect.ValueOf(a))
- rb := reflect.Indirect(reflect.ValueOf(b))
-
- if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
- // If the elements are both nil, and of the same type the are equal
- // If they are of different types they are not equal
- return reflect.TypeOf(a) == reflect.TypeOf(b)
- } else if raValid != rbValid {
- // Both values must be valid to be equal
- return false
- }
-
- return reflect.DeepEqual(ra.Interface(), rb.Interface())
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go
deleted file mode 100644
index 7a5db6e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package awsutil_test
-
-import (
- "testing"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/stretchr/testify/assert"
-)
-
-func TestDeepEqual(t *testing.T) {
- cases := []struct {
- a, b interface{}
- equal bool
- }{
- {"a", "a", true},
- {"a", "b", false},
- {"a", aws.String(""), false},
- {"a", nil, false},
- {"a", aws.String("a"), true},
- {(*bool)(nil), (*bool)(nil), true},
- {(*bool)(nil), (*string)(nil), false},
- {nil, nil, true},
- }
-
- for i, c := range cases {
- assert.Equal(t, c.equal, awsutil.DeepEqual(c.a, c.b), "%d, a:%v b:%v, %t", i, c.a, c.b, c.equal)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
deleted file mode 100644
index 11c52c3..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package awsutil
-
-import (
- "reflect"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/jmespath/go-jmespath"
-)
-
-var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
-
-// rValuesAtPath returns a slice of values found in value v. The values
-// in v are explored recursively so all nested values are collected.
-func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
- pathparts := strings.Split(path, "||")
- if len(pathparts) > 1 {
- for _, pathpart := range pathparts {
- vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
- if len(vals) > 0 {
- return vals
- }
- }
- return nil
- }
-
- values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
- components := strings.Split(path, ".")
- for len(values) > 0 && len(components) > 0 {
- var index *int64
- var indexStar bool
- c := strings.TrimSpace(components[0])
- if c == "" { // no actual component, illegal syntax
- return nil
- } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
- // TODO normalize case for user
- return nil // don't support unexported fields
- }
-
- // parse this component
- if m := indexRe.FindStringSubmatch(c); m != nil {
- c = m[1]
- if m[2] == "" {
- index = nil
- indexStar = true
- } else {
- i, _ := strconv.ParseInt(m[2], 10, 32)
- index = &i
- indexStar = false
- }
- }
-
- nextvals := []reflect.Value{}
- for _, value := range values {
- // pull component name out of struct member
- if value.Kind() != reflect.Struct {
- continue
- }
-
- if c == "*" { // pull all members
- for i := 0; i < value.NumField(); i++ {
- if f := reflect.Indirect(value.Field(i)); f.IsValid() {
- nextvals = append(nextvals, f)
- }
- }
- continue
- }
-
- value = value.FieldByNameFunc(func(name string) bool {
- if c == name {
- return true
- } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
- return true
- }
- return false
- })
-
- if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
- if !value.IsNil() {
- value.Set(reflect.Zero(value.Type()))
- }
- return []reflect.Value{value}
- }
-
- if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
- // TODO if the value is the terminus it should not be created
- // if the value to be set to its position is nil.
- value.Set(reflect.New(value.Type().Elem()))
- value = value.Elem()
- } else {
- value = reflect.Indirect(value)
- }
-
- if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
- if !createPath && value.IsNil() {
- value = reflect.ValueOf(nil)
- }
- }
-
- if value.IsValid() {
- nextvals = append(nextvals, value)
- }
- }
- values = nextvals
-
- if indexStar || index != nil {
- nextvals = []reflect.Value{}
- for _, valItem := range values {
- value := reflect.Indirect(valItem)
- if value.Kind() != reflect.Slice {
- continue
- }
-
- if indexStar { // grab all indices
- for i := 0; i < value.Len(); i++ {
- idx := reflect.Indirect(value.Index(i))
- if idx.IsValid() {
- nextvals = append(nextvals, idx)
- }
- }
- continue
- }
-
- // pull out index
- i := int(*index)
- if i >= value.Len() { // check out of bounds
- if createPath {
- // TODO resize slice
- } else {
- continue
- }
- } else if i < 0 { // support negative indexing
- i = value.Len() + i
- }
- value = reflect.Indirect(value.Index(i))
-
- if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
- if !createPath && value.IsNil() {
- value = reflect.ValueOf(nil)
- }
- }
-
- if value.IsValid() {
- nextvals = append(nextvals, value)
- }
- }
- values = nextvals
- }
-
- components = components[1:]
- }
- return values
-}
-
-// ValuesAtPath returns a list of values at the case insensitive lexical
-// path inside of a structure.
-func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
- result, err := jmespath.Search(path, i)
- if err != nil {
- return nil, err
- }
-
- v := reflect.ValueOf(result)
- if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
- return nil, nil
- }
- if s, ok := result.([]interface{}); ok {
- return s, err
- }
- if v.Kind() == reflect.Map && v.Len() == 0 {
- return nil, nil
- }
- if v.Kind() == reflect.Slice {
- out := make([]interface{}, v.Len())
- for i := 0; i < v.Len(); i++ {
- out[i] = v.Index(i).Interface()
- }
- return out, nil
- }
-
- return []interface{}{result}, nil
-}
-
-// SetValueAtPath sets a value at the case insensitive lexical path inside
-// of a structure.
-func SetValueAtPath(i interface{}, path string, v interface{}) {
- if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
- for _, rval := range rvals {
- if rval.Kind() == reflect.Ptr && rval.IsNil() {
- continue
- }
- setValue(rval, v)
- }
- }
-}
-
-func setValue(dstVal reflect.Value, src interface{}) {
- if dstVal.Kind() == reflect.Ptr {
- dstVal = reflect.Indirect(dstVal)
- }
- srcVal := reflect.ValueOf(src)
-
- if !srcVal.IsValid() { // src is literal nil
- if dstVal.CanAddr() {
- // Convert to pointer so that pointer's value can be nil'ed
- // dstVal = dstVal.Addr()
- }
- dstVal.Set(reflect.Zero(dstVal.Type()))
-
- } else if srcVal.Kind() == reflect.Ptr {
- if srcVal.IsNil() {
- srcVal = reflect.Zero(dstVal.Type())
- } else {
- srcVal = reflect.ValueOf(src).Elem()
- }
- dstVal.Set(srcVal)
- } else {
- dstVal.Set(srcVal)
- }
-
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
deleted file mode 100644
index b222556..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package awsutil_test
-
-import (
- "testing"
-
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/stretchr/testify/assert"
-)
-
-type Struct struct {
- A []Struct
- z []Struct
- B *Struct
- D *Struct
- C string
- E map[string]string
-}
-
-var data = Struct{
- A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
- z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
- B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}},
- C: "initial",
-}
-var data2 = Struct{A: []Struct{
- {A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}},
- {A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}},
-}}
-
-func TestValueAtPathSuccess(t *testing.T) {
- var testCases = []struct {
- expect []interface{}
- data interface{}
- path string
- }{
- {[]interface{}{"initial"}, data, "C"},
- {[]interface{}{"value1"}, data, "A[0].C"},
- {[]interface{}{"value2"}, data, "A[1].C"},
- {[]interface{}{"value3"}, data, "A[2].C"},
- {[]interface{}{"value3"}, data, "a[2].c"},
- {[]interface{}{"value3"}, data, "A[-1].C"},
- {[]interface{}{"value1", "value2", "value3"}, data, "A[].C"},
- {[]interface{}{"terminal"}, data, "B . B . C"},
- {[]interface{}{"initial"}, data, "A.D.X || C"},
- {[]interface{}{"initial"}, data, "A[0].B || C"},
- {[]interface{}{
- Struct{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}},
- Struct{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}},
- }, data2, "A"},
- }
- for i, c := range testCases {
- v, err := awsutil.ValuesAtPath(c.data, c.path)
- assert.NoError(t, err, "case %d, expected no error, %s", i, c.path)
- assert.Equal(t, c.expect, v, "case %d, %s", i, c.path)
- }
-}
-
-func TestValueAtPathFailure(t *testing.T) {
- var testCases = []struct {
- expect []interface{}
- errContains string
- data interface{}
- path string
- }{
- {nil, "", data, "C.x"},
- {nil, "SyntaxError: Invalid token: tDot", data, ".x"},
- {nil, "", data, "X.Y.Z"},
- {nil, "", data, "A[100].C"},
- {nil, "", data, "A[3].C"},
- {nil, "", data, "B.B.C.Z"},
- {nil, "", data, "z[-1].C"},
- {nil, "", nil, "A.B.C"},
- {[]interface{}{}, "", Struct{}, "A"},
- {nil, "", data, "A[0].B.C"},
- {nil, "", data, "D"},
- }
-
- for i, c := range testCases {
- v, err := awsutil.ValuesAtPath(c.data, c.path)
- if c.errContains != "" {
- assert.Contains(t, err.Error(), c.errContains, "case %d, expected error, %s", i, c.path)
- continue
- } else {
- assert.NoError(t, err, "case %d, expected no error, %s", i, c.path)
- }
- assert.Equal(t, c.expect, v, "case %d, %s", i, c.path)
- }
-}
-
-func TestSetValueAtPathSuccess(t *testing.T) {
- var s Struct
- awsutil.SetValueAtPath(&s, "C", "test1")
- awsutil.SetValueAtPath(&s, "B.B.C", "test2")
- awsutil.SetValueAtPath(&s, "B.D.C", "test3")
- assert.Equal(t, "test1", s.C)
- assert.Equal(t, "test2", s.B.B.C)
- assert.Equal(t, "test3", s.B.D.C)
-
- awsutil.SetValueAtPath(&s, "B.*.C", "test0")
- assert.Equal(t, "test0", s.B.B.C)
- assert.Equal(t, "test0", s.B.D.C)
-
- var s2 Struct
- awsutil.SetValueAtPath(&s2, "b.b.c", "test0")
- assert.Equal(t, "test0", s2.B.B.C)
- awsutil.SetValueAtPath(&s2, "A", []Struct{{}})
- assert.Equal(t, []Struct{{}}, s2.A)
-
- str := "foo"
-
- s3 := Struct{}
- awsutil.SetValueAtPath(&s3, "b.b.c", str)
- assert.Equal(t, "foo", s3.B.B.C)
-
- s3 = Struct{B: &Struct{B: &Struct{C: str}}}
- awsutil.SetValueAtPath(&s3, "b.b.c", nil)
- assert.Equal(t, "", s3.B.B.C)
-
- s3 = Struct{}
- awsutil.SetValueAtPath(&s3, "b.b.c", nil)
- assert.Equal(t, "", s3.B.B.C)
-
- s3 = Struct{}
- awsutil.SetValueAtPath(&s3, "b.b.c", &str)
- assert.Equal(t, "foo", s3.B.B.C)
-
- var s4 struct{ Name *string }
- awsutil.SetValueAtPath(&s4, "Name", str)
- assert.Equal(t, str, *s4.Name)
-
- s4 = struct{ Name *string }{}
- awsutil.SetValueAtPath(&s4, "Name", nil)
- assert.Equal(t, (*string)(nil), s4.Name)
-
- s4 = struct{ Name *string }{Name: &str}
- awsutil.SetValueAtPath(&s4, "Name", nil)
- assert.Equal(t, (*string)(nil), s4.Name)
-
- s4 = struct{ Name *string }{}
- awsutil.SetValueAtPath(&s4, "Name", &str)
- assert.Equal(t, str, *s4.Name)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
deleted file mode 100644
index fc38172..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package awsutil
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "strings"
-)
-
-// Prettify returns the string representation of a value.
-func Prettify(i interface{}) string {
- var buf bytes.Buffer
- prettify(reflect.ValueOf(i), 0, &buf)
- return buf.String()
-}
-
-// prettify will recursively walk value v to build a textual
-// representation of the value.
-func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
- for v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- switch v.Kind() {
- case reflect.Struct:
- strtype := v.Type().String()
- if strtype == "time.Time" {
- fmt.Fprintf(buf, "%s", v.Interface())
- break
- } else if strings.HasPrefix(strtype, "io.") {
- buf.WriteString("")
- break
- }
-
- buf.WriteString("{\n")
-
- names := []string{}
- for i := 0; i < v.Type().NumField(); i++ {
- name := v.Type().Field(i).Name
- f := v.Field(i)
- if name[0:1] == strings.ToLower(name[0:1]) {
- continue // ignore unexported fields
- }
- if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
- continue // ignore unset fields
- }
- names = append(names, name)
- }
-
- for i, n := range names {
- val := v.FieldByName(n)
- buf.WriteString(strings.Repeat(" ", indent+2))
- buf.WriteString(n + ": ")
- prettify(val, indent+2, buf)
-
- if i < len(names)-1 {
- buf.WriteString(",\n")
- }
- }
-
- buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
- case reflect.Slice:
- nl, id, id2 := "", "", ""
- if v.Len() > 3 {
- nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
- }
- buf.WriteString("[" + nl)
- for i := 0; i < v.Len(); i++ {
- buf.WriteString(id2)
- prettify(v.Index(i), indent+2, buf)
-
- if i < v.Len()-1 {
- buf.WriteString("," + nl)
- }
- }
-
- buf.WriteString(nl + id + "]")
- case reflect.Map:
- buf.WriteString("{\n")
-
- for i, k := range v.MapKeys() {
- buf.WriteString(strings.Repeat(" ", indent+2))
- buf.WriteString(k.String() + ": ")
- prettify(v.MapIndex(k), indent+2, buf)
-
- if i < v.Len()-1 {
- buf.WriteString(",\n")
- }
- }
-
- buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
- default:
- if !v.IsValid() {
- fmt.Fprint(buf, "")
- return
- }
- format := "%v"
- switch v.Interface().(type) {
- case string:
- format = "%q"
- case io.ReadSeeker, io.Reader:
- format = "buffer(%p)"
- }
- fmt.Fprintf(buf, format, v.Interface())
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
deleted file mode 100644
index b6432f1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package awsutil
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "strings"
-)
-
-// StringValue returns the string representation of a value.
-func StringValue(i interface{}) string {
- var buf bytes.Buffer
- stringValue(reflect.ValueOf(i), 0, &buf)
- return buf.String()
-}
-
-func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
- for v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- switch v.Kind() {
- case reflect.Struct:
- buf.WriteString("{\n")
-
- names := []string{}
- for i := 0; i < v.Type().NumField(); i++ {
- name := v.Type().Field(i).Name
- f := v.Field(i)
- if name[0:1] == strings.ToLower(name[0:1]) {
- continue // ignore unexported fields
- }
- if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
- continue // ignore unset fields
- }
- names = append(names, name)
- }
-
- for i, n := range names {
- val := v.FieldByName(n)
- buf.WriteString(strings.Repeat(" ", indent+2))
- buf.WriteString(n + ": ")
- stringValue(val, indent+2, buf)
-
- if i < len(names)-1 {
- buf.WriteString(",\n")
- }
- }
-
- buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
- case reflect.Slice:
- nl, id, id2 := "", "", ""
- if v.Len() > 3 {
- nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
- }
- buf.WriteString("[" + nl)
- for i := 0; i < v.Len(); i++ {
- buf.WriteString(id2)
- stringValue(v.Index(i), indent+2, buf)
-
- if i < v.Len()-1 {
- buf.WriteString("," + nl)
- }
- }
-
- buf.WriteString(nl + id + "]")
- case reflect.Map:
- buf.WriteString("{\n")
-
- for i, k := range v.MapKeys() {
- buf.WriteString(strings.Repeat(" ", indent+2))
- buf.WriteString(k.String() + ": ")
- stringValue(v.MapIndex(k), indent+2, buf)
-
- if i < v.Len()-1 {
- buf.WriteString(",\n")
- }
- }
-
- buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
- default:
- format := "%v"
- switch v.Interface().(type) {
- case string:
- format = "%q"
- }
- fmt.Fprintf(buf, format, v.Interface())
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
deleted file mode 100644
index 7c0e7d9..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package client
-
-import (
- "fmt"
- "net/http/httputil"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// A Config provides configuration to a service client instance.
-type Config struct {
- Config *aws.Config
- Handlers request.Handlers
- Endpoint, SigningRegion string
-}
-
-// ConfigProvider provides a generic way for a service client to receive
-// the ClientConfig without circular dependencies.
-type ConfigProvider interface {
- ClientConfig(serviceName string, cfgs ...*aws.Config) Config
-}
-
-// A Client implements the base client request and response handling
-// used by all service clients.
-type Client struct {
- request.Retryer
- metadata.ClientInfo
-
- Config aws.Config
- Handlers request.Handlers
-}
-
-// New will return a pointer to a new initialized service client.
-func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
- svc := &Client{
- Config: cfg,
- ClientInfo: info,
- Handlers: handlers,
- }
-
- switch retryer, ok := cfg.Retryer.(request.Retryer); {
- case ok:
- svc.Retryer = retryer
- case cfg.Retryer != nil && cfg.Logger != nil:
- s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
- cfg.Logger.Log(s)
- fallthrough
- default:
- maxRetries := aws.IntValue(cfg.MaxRetries)
- if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
- maxRetries = 3
- }
- svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
- }
-
- svc.AddDebugHandlers()
-
- for _, option := range options {
- option(svc)
- }
-
- return svc
-}
-
-// NewRequest returns a new Request pointer for the service API
-// operation and parameters.
-func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
- return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
-}
-
-// AddDebugHandlers injects debug logging handlers into the service to log request
-// debug information.
-func (c *Client) AddDebugHandlers() {
- if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
- return
- }
-
- c.Handlers.Send.PushFront(logRequest)
- c.Handlers.Send.PushBack(logResponse)
-}
-
-const logReqMsg = `DEBUG: Request %s/%s Details:
----[ REQUEST POST-SIGN ]-----------------------------
-%s
------------------------------------------------------`
-
-const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
----[ REQUEST DUMP ERROR ]-----------------------------
-%s
------------------------------------------------------`
-
-func logRequest(r *request.Request) {
- logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
- if err != nil {
- r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
- return
- }
-
- if logBody {
- // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
- // Body as a NoOpCloser and will not be reset after read by the HTTP
- // client reader.
- r.ResetBody()
- }
-
- r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
-}
-
-const logRespMsg = `DEBUG: Response %s/%s Details:
----[ RESPONSE ]--------------------------------------
-%s
------------------------------------------------------`
-
-const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
----[ RESPONSE DUMP ERROR ]-----------------------------
-%s
------------------------------------------------------`
-
-func logResponse(r *request.Request) {
- var msg = "no response data"
- if r.HTTPResponse != nil {
- logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody)
- if err != nil {
- r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
- return
- }
-
- msg = string(dumpedBody)
- } else if r.Error != nil {
- msg = r.Error.Error()
- }
- r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
deleted file mode 100644
index 43a3676..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package client
-
-import (
- "math/rand"
- "sync"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// DefaultRetryer implements basic retry logic using exponential backoff for
-// most services. If you want to implement custom retry logic, implement the
-// request.Retryer interface or create a structure type that composes this
-// struct and override the specific methods. For example, to override only
-// the MaxRetries method:
-//
-// type retryer struct {
-// service.DefaultRetryer
-// }
-//
-// // This implementation always has 100 max retries
-// func (d retryer) MaxRetries() uint { return 100 }
-type DefaultRetryer struct {
- NumMaxRetries int
-}
-
-// MaxRetries returns the number of maximum returns the service will use to make
-// an individual API request.
-func (d DefaultRetryer) MaxRetries() int {
- return d.NumMaxRetries
-}
-
-var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
-
-// RetryRules returns the delay duration before retrying this request again
-func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
- // Set the upper limit of delay in retrying at ~five minutes
- minTime := 30
- throttle := d.shouldThrottle(r)
- if throttle {
- minTime = 500
- }
-
- retryCount := r.RetryCount
- if retryCount > 13 {
- retryCount = 13
- } else if throttle && retryCount > 8 {
- retryCount = 8
- }
-
- delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
- return time.Duration(delay) * time.Millisecond
-}
-
-// ShouldRetry returns true if the request should be retried.
-func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
- if r.HTTPResponse.StatusCode >= 500 {
- return true
- }
- return r.IsErrorRetryable() || d.shouldThrottle(r)
-}
-
-// ShouldThrottle returns true if the request should be throttled.
-func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
- if r.HTTPResponse.StatusCode == 502 ||
- r.HTTPResponse.StatusCode == 503 ||
- r.HTTPResponse.StatusCode == 504 {
- return true
- }
- return r.IsErrorThrottle()
-}
-
-// lockedSource is a thread-safe implementation of rand.Source
-type lockedSource struct {
- lk sync.Mutex
- src rand.Source
-}
-
-func (r *lockedSource) Int63() (n int64) {
- r.lk.Lock()
- n = r.src.Int63()
- r.lk.Unlock()
- return
-}
-
-func (r *lockedSource) Seed(seed int64) {
- r.lk.Lock()
- r.src.Seed(seed)
- r.lk.Unlock()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
deleted file mode 100644
index 4778056..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package metadata
-
-// ClientInfo wraps immutable data from the client.Client structure.
-type ClientInfo struct {
- ServiceName string
- APIVersion string
- Endpoint string
- SigningName string
- SigningRegion string
- JSONVersion string
- TargetPrefix string
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
deleted file mode 100644
index 34c2bab..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ /dev/null
@@ -1,419 +0,0 @@
-package aws
-
-import (
- "net/http"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/credentials"
-)
-
-// UseServiceDefaultRetries instructs the config to use the service's own
-// default number of retries. This will be the default action if
-// Config.MaxRetries is nil also.
-const UseServiceDefaultRetries = -1
-
-// RequestRetryer is an alias for a type that implements the request.Retryer
-// interface.
-type RequestRetryer interface{}
-
-// A Config provides service configuration for service clients. By default,
-// all clients will use the defaults.DefaultConfig tructure.
-//
-// // Create Session with MaxRetry configuration to be shared by multiple
-// // service clients.
-// sess, err := session.NewSession(&aws.Config{
-// MaxRetries: aws.Int(3),
-// })
-//
-// // Create S3 service client with a specific Region.
-// svc := s3.New(sess, &aws.Config{
-// Region: aws.String("us-west-2"),
-// })
-type Config struct {
- // Enables verbose error printing of all credential chain errors.
- // Should be used when wanting to see all errors while attempting to
- // retrieve credentials.
- CredentialsChainVerboseErrors *bool
-
- // The credentials object to use when signing requests. Defaults to a
- // chain of credential providers to search for credentials in environment
- // variables, shared credential file, and EC2 Instance Roles.
- Credentials *credentials.Credentials
-
- // An optional endpoint URL (hostname only or fully qualified URI)
- // that overrides the default generated endpoint for a client. Set this
- // to `""` to use the default generated endpoint.
- //
- // @note You must still provide a `Region` value when specifying an
- // endpoint for a client.
- Endpoint *string
-
- // The region to send requests to. This parameter is required and must
- // be configured globally or on a per-client basis unless otherwise
- // noted. A full list of regions is found in the "Regions and Endpoints"
- // document.
- //
- // @see http://docs.aws.amazon.com/general/latest/gr/rande.html
- // AWS Regions and Endpoints
- Region *string
-
- // Set this to `true` to disable SSL when sending requests. Defaults
- // to `false`.
- DisableSSL *bool
-
- // The HTTP client to use when sending requests. Defaults to
- // `http.DefaultClient`.
- HTTPClient *http.Client
-
- // An integer value representing the logging level. The default log level
- // is zero (LogOff), which represents no logging. To enable logging set
- // to a LogLevel Value.
- LogLevel *LogLevelType
-
- // The logger writer interface to write logging messages to. Defaults to
- // standard out.
- Logger Logger
-
- // The maximum number of times that a request will be retried for failures.
- // Defaults to -1, which defers the max retry setting to the service
- // specific configuration.
- MaxRetries *int
-
- // Retryer guides how HTTP requests should be retried in case of
- // recoverable failures.
- //
- // When nil or the value does not implement the request.Retryer interface,
- // the request.DefaultRetryer will be used.
- //
- // When both Retryer and MaxRetries are non-nil, the former is used and
- // the latter ignored.
- //
- // To set the Retryer field in a type-safe manner and with chaining, use
- // the request.WithRetryer helper function:
- //
- // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
- //
- Retryer RequestRetryer
-
- // Disables semantic parameter validation, which validates input for
- // missing required fields and/or other semantic request input errors.
- DisableParamValidation *bool
-
- // Disables the computation of request and response checksums, e.g.,
- // CRC32 checksums in Amazon DynamoDB.
- DisableComputeChecksums *bool
-
- // Set this to `true` to force the request to use path-style addressing,
- // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
- // will use virtual hosted bucket addressing when possible
- // (`http://BUCKET.s3.amazonaws.com/KEY`).
- //
- // @note This configuration option is specific to the Amazon S3 service.
- // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
- // Amazon S3: Virtual Hosting of Buckets
- S3ForcePathStyle *bool
-
- // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
- // header to PUT requests over 2MB of content. 100-Continue instructs the
- // HTTP client not to send the body until the service responds with a
- // `continue` status. This is useful to prevent sending the request body
- // until after the request is authenticated, and validated.
- //
- // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
- //
- // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
- // `ExpectContinueTimeout` for information on adjusting the continue wait
- // timeout. https://golang.org/pkg/net/http/#Transport
- //
- // You should use this flag to disble 100-Continue if you experience issues
- // with proxies or third party S3 compatible services.
- S3Disable100Continue *bool
-
- // Set this to `true` to enable S3 Accelerate feature. For all operations
- // compatible with S3 Accelerate will use the accelerate endpoint for
- // requests. Requests not compatible will fall back to normal S3 requests.
- //
- // The bucket must be enable for accelerate to be used with S3 client with
- // accelerate enabled. If the bucket is not enabled for accelerate an error
- // will be returned. The bucket name must be DNS compatible to also work
- // with accelerate.
- S3UseAccelerate *bool
-
- // Set this to `true` to disable the EC2Metadata client from overriding the
- // default http.Client's Timeout. This is helpful if you do not want the
- // EC2Metadata client to create a new http.Client. This options is only
- // meaningful if you're not already using a custom HTTP client with the
- // SDK. Enabled by default.
- //
- // Must be set and provided to the session.NewSession() in order to disable
- // the EC2Metadata overriding the timeout for default credentials chain.
- //
- // Example:
- // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
- //
- // svc := s3.New(sess)
- //
- EC2MetadataDisableTimeoutOverride *bool
-
- // Instructs the endpiont to be generated for a service client to
- // be the dual stack endpoint. The dual stack endpoint will support
- // both IPv4 and IPv6 addressing.
- //
- // Setting this for a service which does not support dual stack will fail
- // to make requets. It is not recommended to set this value on the session
- // as it will apply to all service clients created with the session. Even
- // services which don't support dual stack endpoints.
- //
- // If the Endpoint config value is also provided the UseDualStack flag
- // will be ignored.
- //
- // Only supported with.
- //
- // sess, err := session.NewSession()
- //
- // svc := s3.New(sess, &aws.Config{
- // UseDualStack: aws.Bool(true),
- // })
- UseDualStack *bool
-
- // SleepDelay is an override for the func the SDK will call when sleeping
- // during the lifecycle of a request. Specifically this will be used for
- // request delays. This value should only be used for testing. To adjust
- // the delay of a request see the aws/client.DefaultRetryer and
- // aws/request.Retryer.
- SleepDelay func(time.Duration)
-}
-
-// NewConfig returns a new Config pointer that can be chained with builder
-// methods to set multiple configuration values inline without using pointers.
-//
-// // Create Session with MaxRetry configuration to be shared by multiple
-// // service clients.
-// sess, err := session.NewSession(aws.NewConfig().
-// WithMaxRetries(3),
-// )
-//
-// // Create S3 service client with a specific Region.
-// svc := s3.New(sess, aws.NewConfig().
-// WithRegion("us-west-2"),
-// )
-func NewConfig() *Config {
- return &Config{}
-}
-
-// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
-// a Config pointer.
-func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
- c.CredentialsChainVerboseErrors = &verboseErrs
- return c
-}
-
-// WithCredentials sets a config Credentials value returning a Config pointer
-// for chaining.
-func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
- c.Credentials = creds
- return c
-}
-
-// WithEndpoint sets a config Endpoint value returning a Config pointer for
-// chaining.
-func (c *Config) WithEndpoint(endpoint string) *Config {
- c.Endpoint = &endpoint
- return c
-}
-
-// WithRegion sets a config Region value returning a Config pointer for
-// chaining.
-func (c *Config) WithRegion(region string) *Config {
- c.Region = ®ion
- return c
-}
-
-// WithDisableSSL sets a config DisableSSL value returning a Config pointer
-// for chaining.
-func (c *Config) WithDisableSSL(disable bool) *Config {
- c.DisableSSL = &disable
- return c
-}
-
-// WithHTTPClient sets a config HTTPClient value returning a Config pointer
-// for chaining.
-func (c *Config) WithHTTPClient(client *http.Client) *Config {
- c.HTTPClient = client
- return c
-}
-
-// WithMaxRetries sets a config MaxRetries value returning a Config pointer
-// for chaining.
-func (c *Config) WithMaxRetries(max int) *Config {
- c.MaxRetries = &max
- return c
-}
-
-// WithDisableParamValidation sets a config DisableParamValidation value
-// returning a Config pointer for chaining.
-func (c *Config) WithDisableParamValidation(disable bool) *Config {
- c.DisableParamValidation = &disable
- return c
-}
-
-// WithDisableComputeChecksums sets a config DisableComputeChecksums value
-// returning a Config pointer for chaining.
-func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
- c.DisableComputeChecksums = &disable
- return c
-}
-
-// WithLogLevel sets a config LogLevel value returning a Config pointer for
-// chaining.
-func (c *Config) WithLogLevel(level LogLevelType) *Config {
- c.LogLevel = &level
- return c
-}
-
-// WithLogger sets a config Logger value returning a Config pointer for
-// chaining.
-func (c *Config) WithLogger(logger Logger) *Config {
- c.Logger = logger
- return c
-}
-
-// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
-// pointer for chaining.
-func (c *Config) WithS3ForcePathStyle(force bool) *Config {
- c.S3ForcePathStyle = &force
- return c
-}
-
-// WithS3Disable100Continue sets a config S3Disable100Continue value returning
-// a Config pointer for chaining.
-func (c *Config) WithS3Disable100Continue(disable bool) *Config {
- c.S3Disable100Continue = &disable
- return c
-}
-
-// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
-// pointer for chaining.
-func (c *Config) WithS3UseAccelerate(enable bool) *Config {
- c.S3UseAccelerate = &enable
- return c
-}
-
-// WithUseDualStack sets a config UseDualStack value returning a Config
-// pointer for chaining.
-func (c *Config) WithUseDualStack(enable bool) *Config {
- c.UseDualStack = &enable
- return c
-}
-
-// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
-// returning a Config pointer for chaining.
-func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
- c.EC2MetadataDisableTimeoutOverride = &enable
- return c
-}
-
-// WithSleepDelay overrides the function used to sleep while waiting for the
-// next retry. Defaults to time.Sleep.
-func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
- c.SleepDelay = fn
- return c
-}
-
-// MergeIn merges the passed in configs into the existing config object.
-func (c *Config) MergeIn(cfgs ...*Config) {
- for _, other := range cfgs {
- mergeInConfig(c, other)
- }
-}
-
-func mergeInConfig(dst *Config, other *Config) {
- if other == nil {
- return
- }
-
- if other.CredentialsChainVerboseErrors != nil {
- dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
- }
-
- if other.Credentials != nil {
- dst.Credentials = other.Credentials
- }
-
- if other.Endpoint != nil {
- dst.Endpoint = other.Endpoint
- }
-
- if other.Region != nil {
- dst.Region = other.Region
- }
-
- if other.DisableSSL != nil {
- dst.DisableSSL = other.DisableSSL
- }
-
- if other.HTTPClient != nil {
- dst.HTTPClient = other.HTTPClient
- }
-
- if other.LogLevel != nil {
- dst.LogLevel = other.LogLevel
- }
-
- if other.Logger != nil {
- dst.Logger = other.Logger
- }
-
- if other.MaxRetries != nil {
- dst.MaxRetries = other.MaxRetries
- }
-
- if other.Retryer != nil {
- dst.Retryer = other.Retryer
- }
-
- if other.DisableParamValidation != nil {
- dst.DisableParamValidation = other.DisableParamValidation
- }
-
- if other.DisableComputeChecksums != nil {
- dst.DisableComputeChecksums = other.DisableComputeChecksums
- }
-
- if other.S3ForcePathStyle != nil {
- dst.S3ForcePathStyle = other.S3ForcePathStyle
- }
-
- if other.S3Disable100Continue != nil {
- dst.S3Disable100Continue = other.S3Disable100Continue
- }
-
- if other.S3UseAccelerate != nil {
- dst.S3UseAccelerate = other.S3UseAccelerate
- }
-
- if other.UseDualStack != nil {
- dst.UseDualStack = other.UseDualStack
- }
-
- if other.EC2MetadataDisableTimeoutOverride != nil {
- dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
- }
-
- if other.SleepDelay != nil {
- dst.SleepDelay = other.SleepDelay
- }
-}
-
-// Copy will return a shallow copy of the Config object. If any additional
-// configurations are provided they will be merged into the new config returned.
-func (c *Config) Copy(cfgs ...*Config) *Config {
- dst := &Config{}
- dst.MergeIn(c)
-
- for _, cfg := range cfgs {
- dst.MergeIn(cfg)
- }
-
- return dst
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config_test.go b/vendor/github.com/aws/aws-sdk-go/aws/config_test.go
deleted file mode 100644
index fe97a31..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/config_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package aws
-
-import (
- "net/http"
- "reflect"
- "testing"
-
- "github.com/aws/aws-sdk-go/aws/credentials"
-)
-
-var testCredentials = credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
-
-var copyTestConfig = Config{
- Credentials: testCredentials,
- Endpoint: String("CopyTestEndpoint"),
- Region: String("COPY_TEST_AWS_REGION"),
- DisableSSL: Bool(true),
- HTTPClient: http.DefaultClient,
- LogLevel: LogLevel(LogDebug),
- Logger: NewDefaultLogger(),
- MaxRetries: Int(3),
- DisableParamValidation: Bool(true),
- DisableComputeChecksums: Bool(true),
- S3ForcePathStyle: Bool(true),
-}
-
-func TestCopy(t *testing.T) {
- want := copyTestConfig
- got := copyTestConfig.Copy()
- if !reflect.DeepEqual(*got, want) {
- t.Errorf("Copy() = %+v", got)
- t.Errorf(" want %+v", want)
- }
-
- got.Region = String("other")
- if got.Region == want.Region {
- t.Errorf("Expect setting copy values not not reflect in source")
- }
-}
-
-func TestCopyReturnsNewInstance(t *testing.T) {
- want := copyTestConfig
- got := copyTestConfig.Copy()
- if got == &want {
- t.Errorf("Copy() = %p; want different instance as source %p", got, &want)
- }
-}
-
-var mergeTestZeroValueConfig = Config{}
-
-var mergeTestConfig = Config{
- Credentials: testCredentials,
- Endpoint: String("MergeTestEndpoint"),
- Region: String("MERGE_TEST_AWS_REGION"),
- DisableSSL: Bool(true),
- HTTPClient: http.DefaultClient,
- LogLevel: LogLevel(LogDebug),
- Logger: NewDefaultLogger(),
- MaxRetries: Int(10),
- DisableParamValidation: Bool(true),
- DisableComputeChecksums: Bool(true),
- S3ForcePathStyle: Bool(true),
-}
-
-var mergeTests = []struct {
- cfg *Config
- in *Config
- want *Config
-}{
- {&Config{}, nil, &Config{}},
- {&Config{}, &mergeTestZeroValueConfig, &Config{}},
- {&Config{}, &mergeTestConfig, &mergeTestConfig},
-}
-
-func TestMerge(t *testing.T) {
- for i, tt := range mergeTests {
- got := tt.cfg.Copy()
- got.MergeIn(tt.in)
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("Config %d %+v", i, tt.cfg)
- t.Errorf(" Merge(%+v)", tt.in)
- t.Errorf(" got %+v", got)
- t.Errorf(" want %+v", tt.want)
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
deleted file mode 100644
index 3b73a7d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
+++ /dev/null
@@ -1,369 +0,0 @@
-package aws
-
-import "time"
-
-// String returns a pointer to the string value passed in.
-func String(v string) *string {
- return &v
-}
-
-// StringValue returns the value of the string pointer passed in or
-// "" if the pointer is nil.
-func StringValue(v *string) string {
- if v != nil {
- return *v
- }
- return ""
-}
-
-// StringSlice converts a slice of string values into a slice of
-// string pointers
-func StringSlice(src []string) []*string {
- dst := make([]*string, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// StringValueSlice converts a slice of string pointers into a slice of
-// string values
-func StringValueSlice(src []*string) []string {
- dst := make([]string, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// StringMap converts a string map of string values into a string
-// map of string pointers
-func StringMap(src map[string]string) map[string]*string {
- dst := make(map[string]*string)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// StringValueMap converts a string map of string pointers into a string
-// map of string values
-func StringValueMap(src map[string]*string) map[string]string {
- dst := make(map[string]string)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Bool returns a pointer to the bool value passed in.
-func Bool(v bool) *bool {
- return &v
-}
-
-// BoolValue returns the value of the bool pointer passed in or
-// false if the pointer is nil.
-func BoolValue(v *bool) bool {
- if v != nil {
- return *v
- }
- return false
-}
-
-// BoolSlice converts a slice of bool values into a slice of
-// bool pointers
-func BoolSlice(src []bool) []*bool {
- dst := make([]*bool, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// BoolValueSlice converts a slice of bool pointers into a slice of
-// bool values
-func BoolValueSlice(src []*bool) []bool {
- dst := make([]bool, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// BoolMap converts a string map of bool values into a string
-// map of bool pointers
-func BoolMap(src map[string]bool) map[string]*bool {
- dst := make(map[string]*bool)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// BoolValueMap converts a string map of bool pointers into a string
-// map of bool values
-func BoolValueMap(src map[string]*bool) map[string]bool {
- dst := make(map[string]bool)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Int returns a pointer to the int value passed in.
-func Int(v int) *int {
- return &v
-}
-
-// IntValue returns the value of the int pointer passed in or
-// 0 if the pointer is nil.
-func IntValue(v *int) int {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// IntSlice converts a slice of int values into a slice of
-// int pointers
-func IntSlice(src []int) []*int {
- dst := make([]*int, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// IntValueSlice converts a slice of int pointers into a slice of
-// int values
-func IntValueSlice(src []*int) []int {
- dst := make([]int, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// IntMap converts a string map of int values into a string
-// map of int pointers
-func IntMap(src map[string]int) map[string]*int {
- dst := make(map[string]*int)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// IntValueMap converts a string map of int pointers into a string
-// map of int values
-func IntValueMap(src map[string]*int) map[string]int {
- dst := make(map[string]int)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Int64 returns a pointer to the int64 value passed in.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Int64Value returns the value of the int64 pointer passed in or
-// 0 if the pointer is nil.
-func Int64Value(v *int64) int64 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Int64Slice converts a slice of int64 values into a slice of
-// int64 pointers
-func Int64Slice(src []int64) []*int64 {
- dst := make([]*int64, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Int64ValueSlice converts a slice of int64 pointers into a slice of
-// int64 values
-func Int64ValueSlice(src []*int64) []int64 {
- dst := make([]int64, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Int64Map converts a string map of int64 values into a string
-// map of int64 pointers
-func Int64Map(src map[string]int64) map[string]*int64 {
- dst := make(map[string]*int64)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Int64ValueMap converts a string map of int64 pointers into a string
-// map of int64 values
-func Int64ValueMap(src map[string]*int64) map[string]int64 {
- dst := make(map[string]int64)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Float64 returns a pointer to the float64 value passed in.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Float64Value returns the value of the float64 pointer passed in or
-// 0 if the pointer is nil.
-func Float64Value(v *float64) float64 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Float64Slice converts a slice of float64 values into a slice of
-// float64 pointers
-func Float64Slice(src []float64) []*float64 {
- dst := make([]*float64, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Float64ValueSlice converts a slice of float64 pointers into a slice of
-// float64 values
-func Float64ValueSlice(src []*float64) []float64 {
- dst := make([]float64, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Float64Map converts a string map of float64 values into a string
-// map of float64 pointers
-func Float64Map(src map[string]float64) map[string]*float64 {
- dst := make(map[string]*float64)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Float64ValueMap converts a string map of float64 pointers into a string
-// map of float64 values
-func Float64ValueMap(src map[string]*float64) map[string]float64 {
- dst := make(map[string]float64)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Time returns a pointer to the time.Time value passed in.
-func Time(v time.Time) *time.Time {
- return &v
-}
-
-// TimeValue returns the value of the time.Time pointer passed in or
-// time.Time{} if the pointer is nil.
-func TimeValue(v *time.Time) time.Time {
- if v != nil {
- return *v
- }
- return time.Time{}
-}
-
-// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
-// The result is undefined if the Unix time cannot be represented by an int64.
-// Which includes calling TimeUnixMilli on a zero Time is undefined.
-//
-// This utility is useful for service API's such as CloudWatch Logs which require
-// their unix time values to be in milliseconds.
-//
-// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
-func TimeUnixMilli(t time.Time) int64 {
- return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
-}
-
-// TimeSlice converts a slice of time.Time values into a slice of
-// time.Time pointers
-func TimeSlice(src []time.Time) []*time.Time {
- dst := make([]*time.Time, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// TimeValueSlice converts a slice of time.Time pointers into a slice of
-// time.Time values
-func TimeValueSlice(src []*time.Time) []time.Time {
- dst := make([]time.Time, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// TimeMap converts a string map of time.Time values into a string
-// map of time.Time pointers
-func TimeMap(src map[string]time.Time) map[string]*time.Time {
- dst := make(map[string]*time.Time)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// TimeValueMap converts a string map of time.Time pointers into a string
-// map of time.Time values
-func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
- dst := make(map[string]time.Time)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go
deleted file mode 100644
index df7a3e5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go
+++ /dev/null
@@ -1,437 +0,0 @@
-package aws
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-var testCasesStringSlice = [][]string{
- {"a", "b", "c", "d", "e"},
- {"a", "b", "", "", "e"},
-}
-
-func TestStringSlice(t *testing.T) {
- for idx, in := range testCasesStringSlice {
- if in == nil {
- continue
- }
- out := StringSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := StringValueSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesStringValueSlice = [][]*string{
- {String("a"), String("b"), nil, String("c")},
-}
-
-func TestStringValueSlice(t *testing.T) {
- for idx, in := range testCasesStringValueSlice {
- if in == nil {
- continue
- }
- out := StringValueSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- if in[i] == nil {
- assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
- }
- }
-
- out2 := StringSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- for i := range out2 {
- if in[i] == nil {
- assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
- }
- }
- }
-}
-
-var testCasesStringMap = []map[string]string{
- {"a": "1", "b": "2", "c": "3"},
-}
-
-func TestStringMap(t *testing.T) {
- for idx, in := range testCasesStringMap {
- if in == nil {
- continue
- }
- out := StringMap(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := StringValueMap(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesBoolSlice = [][]bool{
- {true, true, false, false},
-}
-
-func TestBoolSlice(t *testing.T) {
- for idx, in := range testCasesBoolSlice {
- if in == nil {
- continue
- }
- out := BoolSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := BoolValueSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesBoolValueSlice = [][]*bool{}
-
-func TestBoolValueSlice(t *testing.T) {
- for idx, in := range testCasesBoolValueSlice {
- if in == nil {
- continue
- }
- out := BoolValueSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- if in[i] == nil {
- assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
- }
- }
-
- out2 := BoolSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- for i := range out2 {
- if in[i] == nil {
- assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
- }
- }
- }
-}
-
-var testCasesBoolMap = []map[string]bool{
- {"a": true, "b": false, "c": true},
-}
-
-func TestBoolMap(t *testing.T) {
- for idx, in := range testCasesBoolMap {
- if in == nil {
- continue
- }
- out := BoolMap(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := BoolValueMap(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesIntSlice = [][]int{
- {1, 2, 3, 4},
-}
-
-func TestIntSlice(t *testing.T) {
- for idx, in := range testCasesIntSlice {
- if in == nil {
- continue
- }
- out := IntSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := IntValueSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesIntValueSlice = [][]*int{}
-
-func TestIntValueSlice(t *testing.T) {
- for idx, in := range testCasesIntValueSlice {
- if in == nil {
- continue
- }
- out := IntValueSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- if in[i] == nil {
- assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
- }
- }
-
- out2 := IntSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- for i := range out2 {
- if in[i] == nil {
- assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
- }
- }
- }
-}
-
-var testCasesIntMap = []map[string]int{
- {"a": 3, "b": 2, "c": 1},
-}
-
-func TestIntMap(t *testing.T) {
- for idx, in := range testCasesIntMap {
- if in == nil {
- continue
- }
- out := IntMap(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := IntValueMap(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesInt64Slice = [][]int64{
- {1, 2, 3, 4},
-}
-
-func TestInt64Slice(t *testing.T) {
- for idx, in := range testCasesInt64Slice {
- if in == nil {
- continue
- }
- out := Int64Slice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := Int64ValueSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesInt64ValueSlice = [][]*int64{}
-
-func TestInt64ValueSlice(t *testing.T) {
- for idx, in := range testCasesInt64ValueSlice {
- if in == nil {
- continue
- }
- out := Int64ValueSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- if in[i] == nil {
- assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
- }
- }
-
- out2 := Int64Slice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- for i := range out2 {
- if in[i] == nil {
- assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
- }
- }
- }
-}
-
-var testCasesInt64Map = []map[string]int64{
- {"a": 3, "b": 2, "c": 1},
-}
-
-func TestInt64Map(t *testing.T) {
- for idx, in := range testCasesInt64Map {
- if in == nil {
- continue
- }
- out := Int64Map(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := Int64ValueMap(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesFloat64Slice = [][]float64{
- {1, 2, 3, 4},
-}
-
-func TestFloat64Slice(t *testing.T) {
- for idx, in := range testCasesFloat64Slice {
- if in == nil {
- continue
- }
- out := Float64Slice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := Float64ValueSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesFloat64ValueSlice = [][]*float64{}
-
-func TestFloat64ValueSlice(t *testing.T) {
- for idx, in := range testCasesFloat64ValueSlice {
- if in == nil {
- continue
- }
- out := Float64ValueSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- if in[i] == nil {
- assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
- }
- }
-
- out2 := Float64Slice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- for i := range out2 {
- if in[i] == nil {
- assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
- }
- }
- }
-}
-
-var testCasesFloat64Map = []map[string]float64{
- {"a": 3, "b": 2, "c": 1},
-}
-
-func TestFloat64Map(t *testing.T) {
- for idx, in := range testCasesFloat64Map {
- if in == nil {
- continue
- }
- out := Float64Map(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := Float64ValueMap(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesTimeSlice = [][]time.Time{
- {time.Now(), time.Now().AddDate(100, 0, 0)},
-}
-
-func TestTimeSlice(t *testing.T) {
- for idx, in := range testCasesTimeSlice {
- if in == nil {
- continue
- }
- out := TimeSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := TimeValueSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
-
-var testCasesTimeValueSlice = [][]*time.Time{}
-
-func TestTimeValueSlice(t *testing.T) {
- for idx, in := range testCasesTimeValueSlice {
- if in == nil {
- continue
- }
- out := TimeValueSlice(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- if in[i] == nil {
- assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
- }
- }
-
- out2 := TimeSlice(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- for i := range out2 {
- if in[i] == nil {
- assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
- } else {
- assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
- }
- }
- }
-}
-
-var testCasesTimeMap = []map[string]time.Time{
- {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()},
-}
-
-func TestTimeMap(t *testing.T) {
- for idx, in := range testCasesTimeMap {
- if in == nil {
- continue
- }
- out := TimeMap(in)
- assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
- for i := range out {
- assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
- }
-
- out2 := TimeValueMap(out)
- assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
- assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
deleted file mode 100644
index 8e12f82..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package corehandlers
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "regexp"
- "runtime"
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// Interface for matching types which also have a Len method.
-type lener interface {
- Len() int
-}
-
-// BuildContentLengthHandler builds the content length of a request based on the body,
-// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
-// to determine request body length and no "Content-Length" was specified it will panic.
-//
-// The Content-Length will only be aded to the request if the length of the body
-// is greater than 0. If the body is empty or the current `Content-Length`
-// header is <= 0, the header will also be stripped.
-var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
- var length int64
-
- if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
- length, _ = strconv.ParseInt(slength, 10, 64)
- } else {
- switch body := r.Body.(type) {
- case nil:
- length = 0
- case lener:
- length = int64(body.Len())
- case io.Seeker:
- r.BodyStart, _ = body.Seek(0, 1)
- end, _ := body.Seek(0, 2)
- body.Seek(r.BodyStart, 0) // make sure to seek back to original location
- length = end - r.BodyStart
- default:
- panic("Cannot get length of body, must provide `ContentLength`")
- }
- }
-
- if length > 0 {
- r.HTTPRequest.ContentLength = length
- r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
- } else {
- r.HTTPRequest.ContentLength = 0
- r.HTTPRequest.Header.Del("Content-Length")
- }
-}}
-
-// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
-var SDKVersionUserAgentHandler = request.NamedHandler{
- Name: "core.SDKVersionUserAgentHandler",
- Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
- runtime.Version(), runtime.GOOS, runtime.GOARCH),
-}
-
-var reStatusCode = regexp.MustCompile(`^(\d{3})`)
-
-// ValidateReqSigHandler is a request handler to ensure that the request's
-// signature doesn't expire before it is sent. This can happen when a request
-// is built and signed signficantly before it is sent. Or signficant delays
-// occur whne retrying requests that would cause the signature to expire.
-var ValidateReqSigHandler = request.NamedHandler{
- Name: "core.ValidateReqSigHandler",
- Fn: func(r *request.Request) {
- // Unsigned requests are not signed
- if r.Config.Credentials == credentials.AnonymousCredentials {
- return
- }
-
- signedTime := r.Time
- if !r.LastSignedAt.IsZero() {
- signedTime = r.LastSignedAt
- }
-
- // 10 minutes to allow for some clock skew/delays in transmission.
- // Would be improved with aws/aws-sdk-go#423
- if signedTime.Add(10 * time.Minute).After(time.Now()) {
- return
- }
-
- fmt.Println("request expired, resigning")
- r.Sign()
- },
-}
-
-// SendHandler is a request handler to send service request using HTTP client.
-var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
- var err error
- r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
- if err != nil {
- // Prevent leaking if an HTTPResponse was returned. Clean up
- // the body.
- if r.HTTPResponse != nil {
- r.HTTPResponse.Body.Close()
- }
- // Capture the case where url.Error is returned for error processing
- // response. e.g. 301 without location header comes back as string
- // error and r.HTTPResponse is nil. Other url redirect errors will
- // comeback in a similar method.
- if e, ok := err.(*url.Error); ok && e.Err != nil {
- if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
- code, _ := strconv.ParseInt(s[1], 10, 64)
- r.HTTPResponse = &http.Response{
- StatusCode: int(code),
- Status: http.StatusText(int(code)),
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
- }
- return
- }
- }
- if r.HTTPResponse == nil {
- // Add a dummy request response object to ensure the HTTPResponse
- // value is consistent.
- r.HTTPResponse = &http.Response{
- StatusCode: int(0),
- Status: http.StatusText(int(0)),
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
- }
- }
- // Catch all other request errors.
- r.Error = awserr.New("RequestError", "send request failed", err)
- r.Retryable = aws.Bool(true) // network errors are retryable
- }
-}}
-
-// ValidateResponseHandler is a request handler to validate service response.
-var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
- if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
- // this may be replaced by an UnmarshalError handler
- r.Error = awserr.New("UnknownError", "unknown error", nil)
- }
-}}
-
-// AfterRetryHandler performs final checks to determine if the request should
-// be retried and how long to delay.
-var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
- // If one of the other handlers already set the retry state
- // we don't want to override it based on the service's state
- if r.Retryable == nil {
- r.Retryable = aws.Bool(r.ShouldRetry(r))
- }
-
- if r.WillRetry() {
- r.RetryDelay = r.RetryRules(r)
- r.Config.SleepDelay(r.RetryDelay)
-
- // when the expired token exception occurs the credentials
- // need to be expired locally so that the next request to
- // get credentials will trigger a credentials refresh.
- if r.IsErrorExpired() {
- r.Config.Credentials.Expire()
- }
-
- r.RetryCount++
- r.Error = nil
- }
-}}
-
-// ValidateEndpointHandler is a request handler to validate a request had the
-// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
-// region is not valid.
-var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
- if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
- r.Error = aws.ErrMissingRegion
- } else if r.ClientInfo.Endpoint == "" {
- r.Error = aws.ErrMissingEndpoint
- }
-}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go
deleted file mode 100644
index ec9d78f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go
+++ /dev/null
@@ -1,232 +0,0 @@
-package corehandlers_test
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "os"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/corehandlers"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/aws/aws-sdk-go/service/s3"
-)
-
-func TestValidateEndpointHandler(t *testing.T) {
- os.Clearenv()
-
- svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2"))
- svc.Handlers.Clear()
- svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
-
- req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
- err := req.Build()
-
- assert.NoError(t, err)
-}
-
-func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
- os.Clearenv()
-
- svc := awstesting.NewClient()
- svc.Handlers.Clear()
- svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
-
- req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
- err := req.Build()
-
- assert.Error(t, err)
- assert.Equal(t, aws.ErrMissingRegion, err)
-}
-
-type mockCredsProvider struct {
- expired bool
- retrieveCalled bool
-}
-
-func (m *mockCredsProvider) Retrieve() (credentials.Value, error) {
- m.retrieveCalled = true
- return credentials.Value{ProviderName: "mockCredsProvider"}, nil
-}
-
-func (m *mockCredsProvider) IsExpired() bool {
- return m.expired
-}
-
-func TestAfterRetryRefreshCreds(t *testing.T) {
- os.Clearenv()
- credProvider := &mockCredsProvider{}
-
- svc := awstesting.NewClient(&aws.Config{
- Credentials: credentials.NewCredentials(credProvider),
- MaxRetries: aws.Int(1),
- })
-
- svc.Handlers.Clear()
- svc.Handlers.ValidateResponse.PushBack(func(r *request.Request) {
- r.Error = awserr.New("UnknownError", "", nil)
- r.HTTPResponse = &http.Response{StatusCode: 400, Body: ioutil.NopCloser(bytes.NewBuffer([]byte{}))}
- })
- svc.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
- r.Error = awserr.New("ExpiredTokenException", "", nil)
- })
- svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
-
- assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired")
- assert.False(t, credProvider.retrieveCalled)
-
- req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
- req.Send()
-
- assert.True(t, svc.Config.Credentials.IsExpired())
- assert.False(t, credProvider.retrieveCalled)
-
- _, err := svc.Config.Credentials.Get()
- assert.NoError(t, err)
- assert.True(t, credProvider.retrieveCalled)
-}
-
-type testSendHandlerTransport struct{}
-
-func (t *testSendHandlerTransport) RoundTrip(r *http.Request) (*http.Response, error) {
- return nil, fmt.Errorf("mock error")
-}
-
-func TestSendHandlerError(t *testing.T) {
- svc := awstesting.NewClient(&aws.Config{
- HTTPClient: &http.Client{
- Transport: &testSendHandlerTransport{},
- },
- })
- svc.Handlers.Clear()
- svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler)
- r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
-
- r.Send()
-
- assert.Error(t, r.Error)
- assert.NotNil(t, r.HTTPResponse)
-}
-
-func TestValidateReqSigHandler(t *testing.T) {
- cases := []struct {
- Req *request.Request
- Resign bool
- }{
- {
- Req: &request.Request{
- Config: aws.Config{Credentials: credentials.AnonymousCredentials},
- Time: time.Now().Add(-15 * time.Minute),
- },
- Resign: false,
- },
- {
- Req: &request.Request{
- Time: time.Now().Add(-15 * time.Minute),
- },
- Resign: true,
- },
- {
- Req: &request.Request{
- Time: time.Now().Add(-1 * time.Minute),
- },
- Resign: false,
- },
- }
-
- for i, c := range cases {
- resigned := false
- c.Req.Handlers.Sign.PushBack(func(r *request.Request) {
- resigned = true
- })
-
- corehandlers.ValidateReqSigHandler.Fn(c.Req)
-
- assert.NoError(t, c.Req.Error, "%d, expect no error", i)
- assert.Equal(t, c.Resign, resigned, "%d, expected resigning to match", i)
- }
-}
-
-func setupContentLengthTestServer(t *testing.T, hasContentLength bool, contentLength int64) *httptest.Server {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- _, ok := r.Header["Content-Length"]
- assert.Equal(t, hasContentLength, ok, "expect content length to be set, %t", hasContentLength)
- assert.Equal(t, contentLength, r.ContentLength)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- r.Body.Close()
-
- authHeader := r.Header.Get("Authorization")
- if hasContentLength {
- assert.Contains(t, authHeader, "content-length")
- } else {
- assert.NotContains(t, authHeader, "content-length")
- }
-
- assert.Equal(t, contentLength, int64(len(b)))
- }))
-
- return server
-}
-
-func TestBuildContentLength_ZeroBody(t *testing.T) {
- server := setupContentLengthTestServer(t, false, 0)
-
- svc := s3.New(unit.Session, &aws.Config{
- Endpoint: aws.String(server.URL),
- S3ForcePathStyle: aws.Bool(true),
- DisableSSL: aws.Bool(true),
- })
- _, err := svc.GetObject(&s3.GetObjectInput{
- Bucket: aws.String("bucketname"),
- Key: aws.String("keyname"),
- })
-
- assert.NoError(t, err)
-}
-
-func TestBuildContentLength_NegativeBody(t *testing.T) {
- server := setupContentLengthTestServer(t, false, 0)
-
- svc := s3.New(unit.Session, &aws.Config{
- Endpoint: aws.String(server.URL),
- S3ForcePathStyle: aws.Bool(true),
- DisableSSL: aws.Bool(true),
- })
- req, _ := svc.GetObjectRequest(&s3.GetObjectInput{
- Bucket: aws.String("bucketname"),
- Key: aws.String("keyname"),
- })
-
- req.HTTPRequest.Header.Set("Content-Length", "-1")
-
- assert.NoError(t, req.Send())
-}
-
-func TestBuildContentLength_WithBody(t *testing.T) {
- server := setupContentLengthTestServer(t, true, 1024)
-
- svc := s3.New(unit.Session, &aws.Config{
- Endpoint: aws.String(server.URL),
- S3ForcePathStyle: aws.Bool(true),
- DisableSSL: aws.Bool(true),
- })
- _, err := svc.PutObject(&s3.PutObjectInput{
- Bucket: aws.String("bucketname"),
- Key: aws.String("keyname"),
- Body: bytes.NewReader(make([]byte, 1024)),
- })
-
- assert.NoError(t, err)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
deleted file mode 100644
index 7d50b15..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package corehandlers
-
-import "github.com/aws/aws-sdk-go/aws/request"
-
-// ValidateParametersHandler is a request handler to validate the input parameters.
-// Validating parameters only has meaning if done prior to the request being sent.
-var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
- if !r.ParamsFilled() {
- return
- }
-
- if v, ok := r.Params.(request.Validator); ok {
- if err := v.Validate(); err != nil {
- r.Error = err
- }
- }
-}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go
deleted file mode 100644
index 66973ca..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package corehandlers_test
-
-import (
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/corehandlers"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/aws/aws-sdk-go/service/kinesis"
- "github.com/stretchr/testify/require"
-)
-
-var testSvc = func() *client.Client {
- s := &client.Client{
- Config: aws.Config{},
- ClientInfo: metadata.ClientInfo{
- ServiceName: "mock-service",
- APIVersion: "2015-01-01",
- },
- }
- return s
-}()
-
-type StructShape struct {
- _ struct{} `type:"structure"`
-
- RequiredList []*ConditionalStructShape `required:"true"`
- RequiredMap map[string]*ConditionalStructShape `required:"true"`
- RequiredBool *bool `required:"true"`
- OptionalStruct *ConditionalStructShape
-
- hiddenParameter *string
-}
-
-func (s *StructShape) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "StructShape"}
- if s.RequiredList == nil {
- invalidParams.Add(request.NewErrParamRequired("RequiredList"))
- }
- if s.RequiredMap == nil {
- invalidParams.Add(request.NewErrParamRequired("RequiredMap"))
- }
- if s.RequiredBool == nil {
- invalidParams.Add(request.NewErrParamRequired("RequiredBool"))
- }
- if s.RequiredList != nil {
- for i, v := range s.RequiredList {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequiredList", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.RequiredMap != nil {
- for i, v := range s.RequiredMap {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequiredMap", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.OptionalStruct != nil {
- if err := s.OptionalStruct.Validate(); err != nil {
- invalidParams.AddNested("OptionalStruct", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-type ConditionalStructShape struct {
- _ struct{} `type:"structure"`
-
- Name *string `required:"true"`
-}
-
-func (s *ConditionalStructShape) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ConditionalStructShape"}
- if s.Name == nil {
- invalidParams.Add(request.NewErrParamRequired("Name"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-func TestNoErrors(t *testing.T) {
- input := &StructShape{
- RequiredList: []*ConditionalStructShape{},
- RequiredMap: map[string]*ConditionalStructShape{
- "key1": {Name: aws.String("Name")},
- "key2": {Name: aws.String("Name")},
- },
- RequiredBool: aws.Bool(true),
- OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")},
- }
-
- req := testSvc.NewRequest(&request.Operation{}, input, nil)
- corehandlers.ValidateParametersHandler.Fn(req)
- require.NoError(t, req.Error)
-}
-
-func TestMissingRequiredParameters(t *testing.T) {
- input := &StructShape{}
- req := testSvc.NewRequest(&request.Operation{}, input, nil)
- corehandlers.ValidateParametersHandler.Fn(req)
-
- require.Error(t, req.Error)
- assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
- assert.Equal(t, "3 validation error(s) found.", req.Error.(awserr.Error).Message())
-
- errs := req.Error.(awserr.BatchedErrors).OrigErrs()
- assert.Len(t, errs, 3)
- assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredList.", errs[0].Error())
- assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredMap.", errs[1].Error())
- assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredBool.", errs[2].Error())
-
- assert.Equal(t, "InvalidParameter: 3 validation error(s) found.\n- missing required field, StructShape.RequiredList.\n- missing required field, StructShape.RequiredMap.\n- missing required field, StructShape.RequiredBool.\n", req.Error.Error())
-}
-
-func TestNestedMissingRequiredParameters(t *testing.T) {
- input := &StructShape{
- RequiredList: []*ConditionalStructShape{{}},
- RequiredMap: map[string]*ConditionalStructShape{
- "key1": {Name: aws.String("Name")},
- "key2": {},
- },
- RequiredBool: aws.Bool(true),
- OptionalStruct: &ConditionalStructShape{},
- }
-
- req := testSvc.NewRequest(&request.Operation{}, input, nil)
- corehandlers.ValidateParametersHandler.Fn(req)
-
- require.Error(t, req.Error)
- assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
- assert.Equal(t, "3 validation error(s) found.", req.Error.(awserr.Error).Message())
-
- errs := req.Error.(awserr.BatchedErrors).OrigErrs()
- assert.Len(t, errs, 3)
- assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredList[0].Name.", errs[0].Error())
- assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredMap[key2].Name.", errs[1].Error())
- assert.Equal(t, "ParamRequiredError: missing required field, StructShape.OptionalStruct.Name.", errs[2].Error())
-}
-
-type testInput struct {
- StringField *string `min:"5"`
- ListField []string `min:"3"`
- MapField map[string]string `min:"4"`
-}
-
-func (s testInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "testInput"}
- if s.StringField != nil && len(*s.StringField) < 5 {
- invalidParams.Add(request.NewErrParamMinLen("StringField", 5))
- }
- if s.ListField != nil && len(s.ListField) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("ListField", 3))
- }
- if s.MapField != nil && len(s.MapField) < 4 {
- invalidParams.Add(request.NewErrParamMinLen("MapField", 4))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-var testsFieldMin = []struct {
- err awserr.Error
- in testInput
-}{
- {
- err: func() awserr.Error {
- invalidParams := request.ErrInvalidParams{Context: "testInput"}
- invalidParams.Add(request.NewErrParamMinLen("StringField", 5))
- return invalidParams
- }(),
- in: testInput{StringField: aws.String("abcd")},
- },
- {
- err: func() awserr.Error {
- invalidParams := request.ErrInvalidParams{Context: "testInput"}
- invalidParams.Add(request.NewErrParamMinLen("StringField", 5))
- invalidParams.Add(request.NewErrParamMinLen("ListField", 3))
- return invalidParams
- }(),
- in: testInput{StringField: aws.String("abcd"), ListField: []string{"a", "b"}},
- },
- {
- err: func() awserr.Error {
- invalidParams := request.ErrInvalidParams{Context: "testInput"}
- invalidParams.Add(request.NewErrParamMinLen("StringField", 5))
- invalidParams.Add(request.NewErrParamMinLen("ListField", 3))
- invalidParams.Add(request.NewErrParamMinLen("MapField", 4))
- return invalidParams
- }(),
- in: testInput{StringField: aws.String("abcd"), ListField: []string{"a", "b"}, MapField: map[string]string{"a": "a", "b": "b"}},
- },
- {
- err: nil,
- in: testInput{StringField: aws.String("abcde"),
- ListField: []string{"a", "b", "c"}, MapField: map[string]string{"a": "a", "b": "b", "c": "c", "d": "d"}},
- },
-}
-
-func TestValidateFieldMinParameter(t *testing.T) {
- for i, c := range testsFieldMin {
- req := testSvc.NewRequest(&request.Operation{}, &c.in, nil)
- corehandlers.ValidateParametersHandler.Fn(req)
-
- assert.Equal(t, c.err, req.Error, "%d case failed", i)
- }
-}
-
-func BenchmarkValidateAny(b *testing.B) {
- input := &kinesis.PutRecordsInput{
- StreamName: aws.String("stream"),
- }
- for i := 0; i < 100; i++ {
- record := &kinesis.PutRecordsRequestEntry{
- Data: make([]byte, 10000),
- PartitionKey: aws.String("partition"),
- }
- input.Records = append(input.Records, record)
- }
-
- req, _ := kinesis.New(unit.Session).PutRecordsRequest(input)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- corehandlers.ValidateParametersHandler.Fn(req)
- if err := req.Error; err != nil {
- b.Fatalf("validation failed: %v", err)
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
deleted file mode 100644
index 6efc77b..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package credentials
-
-import (
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-var (
- // ErrNoValidProvidersFoundInChain Is returned when there are no valid
- // providers in the ChainProvider.
- //
- // This has been deprecated. For verbose error messaging set
- // aws.Config.CredentialsChainVerboseErrors to true
- //
- // @readonly
- ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
- `no valid providers in chain. Deprecated.
- For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
- nil)
-)
-
-// A ChainProvider will search for a provider which returns credentials
-// and cache that provider until Retrieve is called again.
-//
-// The ChainProvider provides a way of chaining multiple providers together
-// which will pick the first available using priority order of the Providers
-// in the list.
-//
-// If none of the Providers retrieve valid credentials Value, ChainProvider's
-// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
-//
-// If a Provider is found which returns valid credentials Value ChainProvider
-// will cache that Provider for all calls to IsExpired(), until Retrieve is
-// called again.
-//
-// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
-// In this example EnvProvider will first check if any credentials are available
-// via the environment variables. If there are none ChainProvider will check
-// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
-// does not return any credentials ChainProvider will return the error
-// ErrNoValidProvidersFoundInChain
-//
-// creds := NewChainCredentials(
-// []Provider{
-// &EnvProvider{},
-// &EC2RoleProvider{
-// Client: ec2metadata.New(sess),
-// },
-// })
-//
-// // Usage of ChainCredentials with aws.Config
-// svc := ec2.New(&aws.Config{Credentials: creds})
-//
-type ChainProvider struct {
- Providers []Provider
- curr Provider
- VerboseErrors bool
-}
-
-// NewChainCredentials returns a pointer to a new Credentials object
-// wrapping a chain of providers.
-func NewChainCredentials(providers []Provider) *Credentials {
- return NewCredentials(&ChainProvider{
- Providers: append([]Provider{}, providers...),
- })
-}
-
-// Retrieve returns the credentials value or error if no provider returned
-// without error.
-//
-// If a provider is found it will be cached and any calls to IsExpired()
-// will return the expired state of the cached provider.
-func (c *ChainProvider) Retrieve() (Value, error) {
- var errs []error
- for _, p := range c.Providers {
- creds, err := p.Retrieve()
- if err == nil {
- c.curr = p
- return creds, nil
- }
- errs = append(errs, err)
- }
- c.curr = nil
-
- var err error
- err = ErrNoValidProvidersFoundInChain
- if c.VerboseErrors {
- err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
- }
- return Value{}, err
-}
-
-// IsExpired will returned the expired state of the currently cached provider
-// if there is one. If there is no current provider, true will be returned.
-func (c *ChainProvider) IsExpired() bool {
- if c.curr != nil {
- return c.curr.IsExpired()
- }
-
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go
deleted file mode 100644
index 3b393a2..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package credentials
-
-import (
- "testing"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/stretchr/testify/assert"
-)
-
-type secondStubProvider struct {
- creds Value
- expired bool
- err error
-}
-
-func (s *secondStubProvider) Retrieve() (Value, error) {
- s.expired = false
- s.creds.ProviderName = "secondStubProvider"
- return s.creds, s.err
-}
-func (s *secondStubProvider) IsExpired() bool {
- return s.expired
-}
-
-func TestChainProviderWithNames(t *testing.T) {
- p := &ChainProvider{
- Providers: []Provider{
- &stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
- &stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
- &secondStubProvider{
- creds: Value{
- AccessKeyID: "AKIF",
- SecretAccessKey: "NOSECRET",
- SessionToken: "",
- },
- },
- &stubProvider{
- creds: Value{
- AccessKeyID: "AKID",
- SecretAccessKey: "SECRET",
- SessionToken: "",
- },
- },
- },
- }
-
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
- assert.Equal(t, "secondStubProvider", creds.ProviderName, "Expect provider name to match")
-
- // Also check credentials
- assert.Equal(t, "AKIF", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "NOSECRET", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
-
-}
-
-func TestChainProviderGet(t *testing.T) {
- p := &ChainProvider{
- Providers: []Provider{
- &stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
- &stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
- &stubProvider{
- creds: Value{
- AccessKeyID: "AKID",
- SecretAccessKey: "SECRET",
- SessionToken: "",
- },
- },
- },
- }
-
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
- assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
-}
-
-func TestChainProviderIsExpired(t *testing.T) {
- stubProvider := &stubProvider{expired: true}
- p := &ChainProvider{
- Providers: []Provider{
- stubProvider,
- },
- }
-
- assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve")
- _, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
- assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
-
- stubProvider.expired = true
- assert.True(t, p.IsExpired(), "Expect return of expired provider")
-
- _, err = p.Retrieve()
- assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
-}
-
-func TestChainProviderWithNoProvider(t *testing.T) {
- p := &ChainProvider{
- Providers: []Provider{},
- }
-
- assert.True(t, p.IsExpired(), "Expect expired with no providers")
- _, err := p.Retrieve()
- assert.Equal(t,
- ErrNoValidProvidersFoundInChain,
- err,
- "Expect no providers error returned")
-}
-
-func TestChainProviderWithNoValidProvider(t *testing.T) {
- errs := []error{
- awserr.New("FirstError", "first provider error", nil),
- awserr.New("SecondError", "second provider error", nil),
- }
- p := &ChainProvider{
- Providers: []Provider{
- &stubProvider{err: errs[0]},
- &stubProvider{err: errs[1]},
- },
- }
-
- assert.True(t, p.IsExpired(), "Expect expired with no providers")
- _, err := p.Retrieve()
-
- assert.Equal(t,
- ErrNoValidProvidersFoundInChain,
- err,
- "Expect no providers error returned")
-}
-
-func TestChainProviderWithNoValidProviderWithVerboseEnabled(t *testing.T) {
- errs := []error{
- awserr.New("FirstError", "first provider error", nil),
- awserr.New("SecondError", "second provider error", nil),
- }
- p := &ChainProvider{
- VerboseErrors: true,
- Providers: []Provider{
- &stubProvider{err: errs[0]},
- &stubProvider{err: errs[1]},
- },
- }
-
- assert.True(t, p.IsExpired(), "Expect expired with no providers")
- _, err := p.Retrieve()
-
- assert.Equal(t,
- awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs),
- err,
- "Expect no providers error returned")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
deleted file mode 100644
index 7b8ebf5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Package credentials provides credential retrieval and management
-//
-// The Credentials is the primary method of getting access to and managing
-// credentials Values. Using dependency injection retrieval of the credential
-// values is handled by a object which satisfies the Provider interface.
-//
-// By default the Credentials.Get() will cache the successful result of a
-// Provider's Retrieve() until Provider.IsExpired() returns true. At which
-// point Credentials will call Provider's Retrieve() to get new credential Value.
-//
-// The Provider is responsible for determining when credentials Value have expired.
-// It is also important to note that Credentials will always call Retrieve the
-// first time Credentials.Get() is called.
-//
-// Example of using the environment variable credentials.
-//
-// creds := NewEnvCredentials()
-//
-// // Retrieve the credentials value
-// credValue, err := creds.Get()
-// if err != nil {
-// // handle error
-// }
-//
-// Example of forcing credentials to expire and be refreshed on the next Get().
-// This may be helpful to proactively expire credentials and refresh them sooner
-// than they would naturally expire on their own.
-//
-// creds := NewCredentials(&EC2RoleProvider{})
-// creds.Expire()
-// credsValue, err := creds.Get()
-// // New credentials will be retrieved instead of from cache.
-//
-//
-// Custom Provider
-//
-// Each Provider built into this package also provides a helper method to generate
-// a Credentials pointer setup with the provider. To use a custom Provider just
-// create a type which satisfies the Provider interface and pass it to the
-// NewCredentials method.
-//
-// type MyProvider struct{}
-// func (m *MyProvider) Retrieve() (Value, error) {...}
-// func (m *MyProvider) IsExpired() bool {...}
-//
-// creds := NewCredentials(&MyProvider{})
-// credValue, err := creds.Get()
-//
-package credentials
-
-import (
- "sync"
- "time"
-)
-
-// AnonymousCredentials is an empty Credential object that can be used as
-// dummy placeholder credentials for requests that do not need signed.
-//
-// This Credentials can be used to configure a service to not sign requests
-// when making service API calls. For example, when accessing public
-// s3 buckets.
-//
-// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
-// // Access public S3 buckets.
-//
-// @readonly
-var AnonymousCredentials = NewStaticCredentials("", "", "")
-
-// A Value is the AWS credentials value for individual credential fields.
-type Value struct {
- // AWS Access key ID
- AccessKeyID string
-
- // AWS Secret Access Key
- SecretAccessKey string
-
- // AWS Session Token
- SessionToken string
-
- // Provider used to get credentials
- ProviderName string
-}
-
-// A Provider is the interface for any component which will provide credentials
-// Value. A provider is required to manage its own Expired state, and what to
-// be expired means.
-//
-// The Provider should not need to implement its own mutexes, because
-// that will be managed by Credentials.
-type Provider interface {
- // Refresh returns nil if it successfully retrieved the value.
- // Error is returned if the value were not obtainable, or empty.
- Retrieve() (Value, error)
-
- // IsExpired returns if the credentials are no longer valid, and need
- // to be retrieved.
- IsExpired() bool
-}
-
-// A Expiry provides shared expiration logic to be used by credentials
-// providers to implement expiry functionality.
-//
-// The best method to use this struct is as an anonymous field within the
-// provider's struct.
-//
-// Example:
-// type EC2RoleProvider struct {
-// Expiry
-// ...
-// }
-type Expiry struct {
- // The date/time when to expire on
- expiration time.Time
-
- // If set will be used by IsExpired to determine the current time.
- // Defaults to time.Now if CurrentTime is not set. Available for testing
- // to be able to mock out the current time.
- CurrentTime func() time.Time
-}
-
-// SetExpiration sets the expiration IsExpired will check when called.
-//
-// If window is greater than 0 the expiration time will be reduced by the
-// window value.
-//
-// Using a window is helpful to trigger credentials to expire sooner than
-// the expiration time given to ensure no requests are made with expired
-// tokens.
-func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
- e.expiration = expiration
- if window > 0 {
- e.expiration = e.expiration.Add(-window)
- }
-}
-
-// IsExpired returns if the credentials are expired.
-func (e *Expiry) IsExpired() bool {
- if e.CurrentTime == nil {
- e.CurrentTime = time.Now
- }
- return e.expiration.Before(e.CurrentTime())
-}
-
-// A Credentials provides synchronous safe retrieval of AWS credentials Value.
-// Credentials will cache the credentials value until they expire. Once the value
-// expires the next Get will attempt to retrieve valid credentials.
-//
-// Credentials is safe to use across multiple goroutines and will manage the
-// synchronous state so the Providers do not need to implement their own
-// synchronization.
-//
-// The first Credentials.Get() will always call Provider.Retrieve() to get the
-// first instance of the credentials Value. All calls to Get() after that
-// will return the cached credentials Value until IsExpired() returns true.
-type Credentials struct {
- creds Value
- forceRefresh bool
- m sync.Mutex
-
- provider Provider
-}
-
-// NewCredentials returns a pointer to a new Credentials with the provider set.
-func NewCredentials(provider Provider) *Credentials {
- return &Credentials{
- provider: provider,
- forceRefresh: true,
- }
-}
-
-// Get returns the credentials value, or error if the credentials Value failed
-// to be retrieved.
-//
-// Will return the cached credentials Value if it has not expired. If the
-// credentials Value has expired the Provider's Retrieve() will be called
-// to refresh the credentials.
-//
-// If Credentials.Expire() was called the credentials Value will be force
-// expired, and the next call to Get() will cause them to be refreshed.
-func (c *Credentials) Get() (Value, error) {
- c.m.Lock()
- defer c.m.Unlock()
-
- if c.isExpired() {
- creds, err := c.provider.Retrieve()
- if err != nil {
- return Value{}, err
- }
- c.creds = creds
- c.forceRefresh = false
- }
-
- return c.creds, nil
-}
-
-// Expire expires the credentials and forces them to be retrieved on the
-// next call to Get().
-//
-// This will override the Provider's expired state, and force Credentials
-// to call the Provider's Retrieve().
-func (c *Credentials) Expire() {
- c.m.Lock()
- defer c.m.Unlock()
-
- c.forceRefresh = true
-}
-
-// IsExpired returns if the credentials are no longer valid, and need
-// to be retrieved.
-//
-// If the Credentials were forced to be expired with Expire() this will
-// reflect that override.
-func (c *Credentials) IsExpired() bool {
- c.m.Lock()
- defer c.m.Unlock()
-
- return c.isExpired()
-}
-
-// isExpired helper method wrapping the definition of expired credentials.
-func (c *Credentials) isExpired() bool {
- return c.forceRefresh || c.provider.IsExpired()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go
deleted file mode 100644
index 7b79ba9..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package credentials
-
-import (
- "testing"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/stretchr/testify/assert"
-)
-
-type stubProvider struct {
- creds Value
- expired bool
- err error
-}
-
-func (s *stubProvider) Retrieve() (Value, error) {
- s.expired = false
- s.creds.ProviderName = "stubProvider"
- return s.creds, s.err
-}
-func (s *stubProvider) IsExpired() bool {
- return s.expired
-}
-
-func TestCredentialsGet(t *testing.T) {
- c := NewCredentials(&stubProvider{
- creds: Value{
- AccessKeyID: "AKID",
- SecretAccessKey: "SECRET",
- SessionToken: "",
- },
- expired: true,
- })
-
- creds, err := c.Get()
- assert.Nil(t, err, "Expected no error")
- assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
-}
-
-func TestCredentialsGetWithError(t *testing.T) {
- c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true})
-
- _, err := c.Get()
- assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error")
-}
-
-func TestCredentialsExpire(t *testing.T) {
- stub := &stubProvider{}
- c := NewCredentials(stub)
-
- stub.expired = false
- assert.True(t, c.IsExpired(), "Expected to start out expired")
- c.Expire()
- assert.True(t, c.IsExpired(), "Expected to be expired")
-
- c.forceRefresh = false
- assert.False(t, c.IsExpired(), "Expected not to be expired")
-
- stub.expired = true
- assert.True(t, c.IsExpired(), "Expected to be expired")
-}
-
-func TestCredentialsGetWithProviderName(t *testing.T) {
- stub := &stubProvider{}
-
- c := NewCredentials(stub)
-
- creds, err := c.Get()
- assert.Nil(t, err, "Expected no error")
- assert.Equal(t, creds.ProviderName, "stubProvider", "Expected provider name to match")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
deleted file mode 100644
index aa9d689..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package ec2rolecreds
-
-import (
- "bufio"
- "encoding/json"
- "fmt"
- "path"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
-)
-
-// ProviderName provides a name of EC2Role provider
-const ProviderName = "EC2RoleProvider"
-
-// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
-// those credentials are expired.
-//
-// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
-// or ExpiryWindow
-//
-// p := &ec2rolecreds.EC2RoleProvider{
-// // Pass in a custom timeout to be used when requesting
-// // IAM EC2 Role credentials.
-// Client: ec2metadata.New(sess, aws.Config{
-// HTTPClient: &http.Client{Timeout: 10 * time.Second},
-// }),
-//
-// // Do not use early expiry of credentials. If a non zero value is
-// // specified the credentials will be expired early
-// ExpiryWindow: 0,
-// }
-type EC2RoleProvider struct {
- credentials.Expiry
-
- // Required EC2Metadata client to use when connecting to EC2 metadata service.
- Client *ec2metadata.EC2Metadata
-
- // ExpiryWindow will allow the credentials to trigger refreshing prior to
- // the credentials actually expiring. This is beneficial so race conditions
- // with expiring credentials do not cause request to fail unexpectedly
- // due to ExpiredTokenException exceptions.
- //
- // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
- // 10 seconds before the credentials are actually expired.
- //
- // If ExpiryWindow is 0 or less it will be ignored.
- ExpiryWindow time.Duration
-}
-
-// NewCredentials returns a pointer to a new Credentials object wrapping
-// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
-// The ConfigProvider is satisfied by the session.Session type.
-func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
- p := &EC2RoleProvider{
- Client: ec2metadata.New(c),
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
-// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
-// metadata service.
-func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
- p := &EC2RoleProvider{
- Client: client,
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// Retrieve retrieves credentials from the EC2 service.
-// Error will be returned if the request fails, or unable to extract
-// the desired credentials.
-func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
- credsList, err := requestCredList(m.Client)
- if err != nil {
- return credentials.Value{ProviderName: ProviderName}, err
- }
-
- if len(credsList) == 0 {
- return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
- }
- credsName := credsList[0]
-
- roleCreds, err := requestCred(m.Client, credsName)
- if err != nil {
- return credentials.Value{ProviderName: ProviderName}, err
- }
-
- m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
-
- return credentials.Value{
- AccessKeyID: roleCreds.AccessKeyID,
- SecretAccessKey: roleCreds.SecretAccessKey,
- SessionToken: roleCreds.Token,
- ProviderName: ProviderName,
- }, nil
-}
-
-// A ec2RoleCredRespBody provides the shape for unmarshalling credential
-// request responses.
-type ec2RoleCredRespBody struct {
- // Success State
- Expiration time.Time
- AccessKeyID string
- SecretAccessKey string
- Token string
-
- // Error state
- Code string
- Message string
-}
-
-const iamSecurityCredsPath = "/iam/security-credentials"
-
-// requestCredList requests a list of credentials from the EC2 service.
-// If there are no credentials, or there is an error making or receiving the request
-func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
- resp, err := client.GetMetadata(iamSecurityCredsPath)
- if err != nil {
- return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
- }
-
- credsList := []string{}
- s := bufio.NewScanner(strings.NewReader(resp))
- for s.Scan() {
- credsList = append(credsList, s.Text())
- }
-
- if err := s.Err(); err != nil {
- return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
- }
-
- return credsList, nil
-}
-
-// requestCred requests the credentials for a specific credentials from the EC2 service.
-//
-// If the credentials cannot be found, or there is an error reading the response
-// and error will be returned.
-func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
- resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
- if err != nil {
- return ec2RoleCredRespBody{},
- awserr.New("EC2RoleRequestError",
- fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
- err)
- }
-
- respCreds := ec2RoleCredRespBody{}
- if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
- return ec2RoleCredRespBody{},
- awserr.New("SerializationError",
- fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
- err)
- }
-
- if respCreds.Code != "Success" {
- // If an error code was returned something failed requesting the role.
- return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
- }
-
- return respCreds, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go
deleted file mode 100644
index cccd4bf..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package ec2rolecreds_test
-
-import (
- "fmt"
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/awstesting/unit"
-)
-
-const credsRespTmpl = `{
- "Code": "Success",
- "Type": "AWS-HMAC",
- "AccessKeyId" : "accessKey",
- "SecretAccessKey" : "secret",
- "Token" : "token",
- "Expiration" : "%s",
- "LastUpdated" : "2009-11-23T0:00:00Z"
-}`
-
-const credsFailRespTmpl = `{
- "Code": "ErrorCode",
- "Message": "ErrorMsg",
- "LastUpdated": "2009-11-23T0:00:00Z"
-}`
-
-func initTestServer(expireOn string, failAssume bool) *httptest.Server {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/latest/meta-data/iam/security-credentials" {
- fmt.Fprintln(w, "RoleName")
- } else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" {
- if failAssume {
- fmt.Fprintf(w, credsFailRespTmpl)
- } else {
- fmt.Fprintf(w, credsRespTmpl, expireOn)
- }
- } else {
- http.Error(w, "bad request", http.StatusBadRequest)
- }
- }))
-
- return server
-}
-
-func TestEC2RoleProvider(t *testing.T) {
- server := initTestServer("2014-12-16T01:51:37Z", false)
- defer server.Close()
-
- p := &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
- }
-
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error, %v", err)
-
- assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
-}
-
-func TestEC2RoleProviderFailAssume(t *testing.T) {
- server := initTestServer("2014-12-16T01:51:37Z", true)
- defer server.Close()
-
- p := &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
- }
-
- creds, err := p.Retrieve()
- assert.Error(t, err, "Expect error")
-
- e := err.(awserr.Error)
- assert.Equal(t, "ErrorCode", e.Code())
- assert.Equal(t, "ErrorMsg", e.Message())
- assert.Nil(t, e.OrigErr())
-
- assert.Equal(t, "", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Equal(t, "", creds.SessionToken, "Expect session token to match")
-}
-
-func TestEC2RoleProviderIsExpired(t *testing.T) {
- server := initTestServer("2014-12-16T01:51:37Z", false)
- defer server.Close()
-
- p := &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
- }
- p.CurrentTime = func() time.Time {
- return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC)
- }
-
- assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
-
- _, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error, %v", err)
-
- assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
-
- p.CurrentTime = func() time.Time {
- return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC)
- }
-
- assert.True(t, p.IsExpired(), "Expect creds to be expired.")
-}
-
-func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) {
- server := initTestServer("2014-12-16T01:51:37Z", false)
- defer server.Close()
-
- p := &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
- ExpiryWindow: time.Hour * 1,
- }
- p.CurrentTime = func() time.Time {
- return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC)
- }
-
- assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
-
- _, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error, %v", err)
-
- assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
-
- p.CurrentTime = func() time.Time {
- return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC)
- }
-
- assert.True(t, p.IsExpired(), "Expect creds to be expired.")
-}
-
-func BenchmarkEC3RoleProvider(b *testing.B) {
- server := initTestServer("2014-12-16T01:51:37Z", false)
- defer server.Close()
-
- p := &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
- }
- _, err := p.Retrieve()
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- if _, err := p.Retrieve(); err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
deleted file mode 100644
index a4cec5c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Package endpointcreds provides support for retrieving credentials from an
-// arbitrary HTTP endpoint.
-//
-// The credentials endpoint Provider can receive both static and refreshable
-// credentials that will expire. Credentials are static when an "Expiration"
-// value is not provided in the endpoint's response.
-//
-// Static credentials will never expire once they have been retrieved. The format
-// of the static credentials response:
-// {
-// "AccessKeyId" : "MUA...",
-// "SecretAccessKey" : "/7PC5om....",
-// }
-//
-// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
-// value in the response. The format of the refreshable credentials response:
-// {
-// "AccessKeyId" : "MUA...",
-// "SecretAccessKey" : "/7PC5om....",
-// "Token" : "AQoDY....=",
-// "Expiration" : "2016-02-25T06:03:31Z"
-// }
-//
-// Errors should be returned in the following format and only returned with 400
-// or 500 HTTP status codes.
-// {
-// "code": "ErrorCode",
-// "message": "Helpful error message."
-// }
-package endpointcreds
-
-import (
- "encoding/json"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// ProviderName is the name of the credentials provider.
-const ProviderName = `CredentialsEndpointProvider`
-
-// Provider satisfies the credentials.Provider interface, and is a client to
-// retrieve credentials from an arbitrary endpoint.
-type Provider struct {
- staticCreds bool
- credentials.Expiry
-
- // Requires a AWS Client to make HTTP requests to the endpoint with.
- // the Endpoint the request will be made to is provided by the aws.Config's
- // Endpoint value.
- Client *client.Client
-
- // ExpiryWindow will allow the credentials to trigger refreshing prior to
- // the credentials actually expiring. This is beneficial so race conditions
- // with expiring credentials do not cause request to fail unexpectedly
- // due to ExpiredTokenException exceptions.
- //
- // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
- // 10 seconds before the credentials are actually expired.
- //
- // If ExpiryWindow is 0 or less it will be ignored.
- ExpiryWindow time.Duration
-}
-
-// NewProviderClient returns a credentials Provider for retrieving AWS credentials
-// from arbitrary endpoint.
-func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
- p := &Provider{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: "CredentialsEndpoint",
- Endpoint: endpoint,
- },
- handlers,
- ),
- }
-
- p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
- p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
- p.Client.Handlers.Validate.Clear()
- p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
-
- for _, option := range options {
- option(p)
- }
-
- return p
-}
-
-// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
-// from an arbitrary endpoint concurrently. The client will request the
-func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
- return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
-}
-
-// IsExpired returns true if the credentials retrieved are expired, or not yet
-// retrieved.
-func (p *Provider) IsExpired() bool {
- if p.staticCreds {
- return false
- }
- return p.Expiry.IsExpired()
-}
-
-// Retrieve will attempt to request the credentials from the endpoint the Provider
-// was configured for. And error will be returned if the retrieval fails.
-func (p *Provider) Retrieve() (credentials.Value, error) {
- resp, err := p.getCredentials()
- if err != nil {
- return credentials.Value{ProviderName: ProviderName},
- awserr.New("CredentialsEndpointError", "failed to load credentials", err)
- }
-
- if resp.Expiration != nil {
- p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
- } else {
- p.staticCreds = true
- }
-
- return credentials.Value{
- AccessKeyID: resp.AccessKeyID,
- SecretAccessKey: resp.SecretAccessKey,
- SessionToken: resp.Token,
- ProviderName: ProviderName,
- }, nil
-}
-
-type getCredentialsOutput struct {
- Expiration *time.Time
- AccessKeyID string
- SecretAccessKey string
- Token string
-}
-
-type errorOutput struct {
- Code string `json:"code"`
- Message string `json:"message"`
-}
-
-func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
- op := &request.Operation{
- Name: "GetCredentials",
- HTTPMethod: "GET",
- }
-
- out := &getCredentialsOutput{}
- req := p.Client.NewRequest(op, nil, out)
- req.HTTPRequest.Header.Set("Accept", "application/json")
-
- return out, req.Send()
-}
-
-func validateEndpointHandler(r *request.Request) {
- if len(r.ClientInfo.Endpoint) == 0 {
- r.Error = aws.ErrMissingEndpoint
- }
-}
-
-func unmarshalHandler(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
-
- out := r.Data.(*getCredentialsOutput)
- if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
- r.Error = awserr.New("SerializationError",
- "failed to decode endpoint credentials",
- err,
- )
- }
-}
-
-func unmarshalError(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
-
- var errOut errorOutput
- if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
- r.Error = awserr.New("SerializationError",
- "failed to decode endpoint credentials",
- err,
- )
- }
-
- // Response body format is not consistent between metadata endpoints.
- // Grab the error message as a string and include that as the source error
- r.Error = awserr.New(errOut.Code, errOut.Message, nil)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider_test.go
deleted file mode 100644
index ad057a3..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package endpointcreds_test
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/stretchr/testify/assert"
-)
-
-func TestRetrieveRefreshableCredentials(t *testing.T) {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- assert.Equal(t, "/path/to/endpoint", r.URL.Path)
- assert.Equal(t, "application/json", r.Header.Get("Accept"))
- assert.Equal(t, "else", r.URL.Query().Get("something"))
-
- encoder := json.NewEncoder(w)
- err := encoder.Encode(map[string]interface{}{
- "AccessKeyID": "AKID",
- "SecretAccessKey": "SECRET",
- "Token": "TOKEN",
- "Expiration": time.Now().Add(1 * time.Hour),
- })
-
- if err != nil {
- fmt.Println("failed to write out creds", err)
- }
- }))
-
- client := endpointcreds.NewProviderClient(*unit.Session.Config,
- unit.Session.Handlers,
- server.URL+"/path/to/endpoint?something=else",
- )
- creds, err := client.Retrieve()
-
- assert.NoError(t, err)
-
- assert.Equal(t, "AKID", creds.AccessKeyID)
- assert.Equal(t, "SECRET", creds.SecretAccessKey)
- assert.Equal(t, "TOKEN", creds.SessionToken)
- assert.False(t, client.IsExpired())
-
- client.(*endpointcreds.Provider).CurrentTime = func() time.Time {
- return time.Now().Add(2 * time.Hour)
- }
-
- assert.True(t, client.IsExpired())
-}
-
-func TestRetrieveStaticCredentials(t *testing.T) {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- encoder := json.NewEncoder(w)
- err := encoder.Encode(map[string]interface{}{
- "AccessKeyID": "AKID",
- "SecretAccessKey": "SECRET",
- })
-
- if err != nil {
- fmt.Println("failed to write out creds", err)
- }
- }))
-
- client := endpointcreds.NewProviderClient(*unit.Session.Config, unit.Session.Handlers, server.URL)
- creds, err := client.Retrieve()
-
- assert.NoError(t, err)
-
- assert.Equal(t, "AKID", creds.AccessKeyID)
- assert.Equal(t, "SECRET", creds.SecretAccessKey)
- assert.Empty(t, creds.SessionToken)
- assert.False(t, client.IsExpired())
-}
-
-func TestFailedRetrieveCredentials(t *testing.T) {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(400)
- encoder := json.NewEncoder(w)
- err := encoder.Encode(map[string]interface{}{
- "Code": "Error",
- "Message": "Message",
- })
-
- if err != nil {
- fmt.Println("failed to write error", err)
- }
- }))
-
- client := endpointcreds.NewProviderClient(*unit.Session.Config, unit.Session.Handlers, server.URL)
- creds, err := client.Retrieve()
-
- assert.Error(t, err)
- aerr := err.(awserr.Error)
-
- assert.Equal(t, "CredentialsEndpointError", aerr.Code())
- assert.Equal(t, "failed to load credentials", aerr.Message())
-
- aerr = aerr.OrigErr().(awserr.Error)
- assert.Equal(t, "Error", aerr.Code())
- assert.Equal(t, "Message", aerr.Message())
-
- assert.Empty(t, creds.AccessKeyID)
- assert.Empty(t, creds.SecretAccessKey)
- assert.Empty(t, creds.SessionToken)
- assert.True(t, client.IsExpired())
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
deleted file mode 100644
index 96655bc..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package credentials
-
-import (
- "os"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// EnvProviderName provides a name of Env provider
-const EnvProviderName = "EnvProvider"
-
-var (
- // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
- // found in the process's environment.
- //
- // @readonly
- ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
-
- // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
- // can't be found in the process's environment.
- //
- // @readonly
- ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
-)
-
-// A EnvProvider retrieves credentials from the environment variables of the
-// running process. Environment credentials never expire.
-//
-// Environment variables used:
-//
-// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
-// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
-type EnvProvider struct {
- retrieved bool
-}
-
-// NewEnvCredentials returns a pointer to a new Credentials object
-// wrapping the environment variable provider.
-func NewEnvCredentials() *Credentials {
- return NewCredentials(&EnvProvider{})
-}
-
-// Retrieve retrieves the keys from the environment.
-func (e *EnvProvider) Retrieve() (Value, error) {
- e.retrieved = false
-
- id := os.Getenv("AWS_ACCESS_KEY_ID")
- if id == "" {
- id = os.Getenv("AWS_ACCESS_KEY")
- }
-
- secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
- if secret == "" {
- secret = os.Getenv("AWS_SECRET_KEY")
- }
-
- if id == "" {
- return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
- }
-
- if secret == "" {
- return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
- }
-
- e.retrieved = true
- return Value{
- AccessKeyID: id,
- SecretAccessKey: secret,
- SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
- ProviderName: EnvProviderName,
- }, nil
-}
-
-// IsExpired returns if the credentials have been retrieved.
-func (e *EnvProvider) IsExpired() bool {
- return !e.retrieved
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go
deleted file mode 100644
index 53f6ce2..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package credentials
-
-import (
- "github.com/stretchr/testify/assert"
- "os"
- "testing"
-)
-
-func TestEnvProviderRetrieve(t *testing.T) {
- os.Clearenv()
- os.Setenv("AWS_ACCESS_KEY_ID", "access")
- os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
- os.Setenv("AWS_SESSION_TOKEN", "token")
-
- e := EnvProvider{}
- creds, err := e.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
-}
-
-func TestEnvProviderIsExpired(t *testing.T) {
- os.Clearenv()
- os.Setenv("AWS_ACCESS_KEY_ID", "access")
- os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
- os.Setenv("AWS_SESSION_TOKEN", "token")
-
- e := EnvProvider{}
-
- assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.")
-
- _, err := e.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.")
-}
-
-func TestEnvProviderNoAccessKeyID(t *testing.T) {
- os.Clearenv()
- os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
-
- e := EnvProvider{}
- creds, err := e.Retrieve()
- assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err)
-}
-
-func TestEnvProviderNoSecretAccessKey(t *testing.T) {
- os.Clearenv()
- os.Setenv("AWS_ACCESS_KEY_ID", "access")
-
- e := EnvProvider{}
- creds, err := e.Retrieve()
- assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err)
-}
-
-func TestEnvProviderAlternateNames(t *testing.T) {
- os.Clearenv()
- os.Setenv("AWS_ACCESS_KEY", "access")
- os.Setenv("AWS_SECRET_KEY", "secret")
-
- e := EnvProvider{}
- creds, err := e.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key")
- assert.Empty(t, creds.SessionToken, "Expected no token")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
deleted file mode 100644
index 7fc91d9..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
+++ /dev/null
@@ -1,12 +0,0 @@
-[default]
-aws_access_key_id = accessKey
-aws_secret_access_key = secret
-aws_session_token = token
-
-[no_token]
-aws_access_key_id = accessKey
-aws_secret_access_key = secret
-
-[with_colon]
-aws_access_key_id: accessKey
-aws_secret_access_key: secret
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
deleted file mode 100644
index 7fb7cbf..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package credentials
-
-import (
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/go-ini/ini"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// SharedCredsProviderName provides a name of SharedCreds provider
-const SharedCredsProviderName = "SharedCredentialsProvider"
-
-var (
- // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
- //
- // @readonly
- ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
-)
-
-// A SharedCredentialsProvider retrieves credentials from the current user's home
-// directory, and keeps track if those credentials are expired.
-//
-// Profile ini file example: $HOME/.aws/credentials
-type SharedCredentialsProvider struct {
- // Path to the shared credentials file.
- //
- // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
- // env value is empty will default to current user's home directory.
- // Linux/OSX: "$HOME/.aws/credentials"
- // Windows: "%USERPROFILE%\.aws\credentials"
- Filename string
-
- // AWS Profile to extract credentials from the shared credentials file. If empty
- // will default to environment variable "AWS_PROFILE" or "default" if
- // environment variable is also not set.
- Profile string
-
- // retrieved states if the credentials have been successfully retrieved.
- retrieved bool
-}
-
-// NewSharedCredentials returns a pointer to a new Credentials object
-// wrapping the Profile file provider.
-func NewSharedCredentials(filename, profile string) *Credentials {
- return NewCredentials(&SharedCredentialsProvider{
- Filename: filename,
- Profile: profile,
- })
-}
-
-// Retrieve reads and extracts the shared credentials from the current
-// users home directory.
-func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
- p.retrieved = false
-
- filename, err := p.filename()
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, err
- }
-
- creds, err := loadProfile(filename, p.profile())
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, err
- }
-
- p.retrieved = true
- return creds, nil
-}
-
-// IsExpired returns if the shared credentials have expired.
-func (p *SharedCredentialsProvider) IsExpired() bool {
- return !p.retrieved
-}
-
-// loadProfiles loads from the file pointed to by shared credentials filename for profile.
-// The credentials retrieved from the profile will be returned or error. Error will be
-// returned if it fails to read from the file, or the data is invalid.
-func loadProfile(filename, profile string) (Value, error) {
- config, err := ini.Load(filename)
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
- }
- iniProfile, err := config.GetSection(profile)
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
- }
-
- id, err := iniProfile.GetKey("aws_access_key_id")
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
- fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
- err)
- }
-
- secret, err := iniProfile.GetKey("aws_secret_access_key")
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
- fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
- nil)
- }
-
- // Default to empty string if not found
- token := iniProfile.Key("aws_session_token")
-
- return Value{
- AccessKeyID: id.String(),
- SecretAccessKey: secret.String(),
- SessionToken: token.String(),
- ProviderName: SharedCredsProviderName,
- }, nil
-}
-
-// filename returns the filename to use to read AWS shared credentials.
-//
-// Will return an error if the user's home directory path cannot be found.
-func (p *SharedCredentialsProvider) filename() (string, error) {
- if p.Filename == "" {
- if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
- return p.Filename, nil
- }
-
- homeDir := os.Getenv("HOME") // *nix
- if homeDir == "" { // Windows
- homeDir = os.Getenv("USERPROFILE")
- }
- if homeDir == "" {
- return "", ErrSharedCredentialsHomeNotFound
- }
-
- p.Filename = filepath.Join(homeDir, ".aws", "credentials")
- }
-
- return p.Filename, nil
-}
-
-// profile returns the AWS shared credentials profile. If empty will read
-// environment variable "AWS_PROFILE". If that is not set profile will
-// return "default".
-func (p *SharedCredentialsProvider) profile() string {
- if p.Profile == "" {
- p.Profile = os.Getenv("AWS_PROFILE")
- }
- if p.Profile == "" {
- p.Profile = "default"
- }
-
- return p.Profile
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
deleted file mode 100644
index 6b4093a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package credentials
-
-import (
- "os"
- "path/filepath"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestSharedCredentialsProvider(t *testing.T) {
- os.Clearenv()
-
- p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
-}
-
-func TestSharedCredentialsProviderIsExpired(t *testing.T) {
- os.Clearenv()
-
- p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
-
- assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve")
-
- _, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve")
-}
-
-func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) {
- os.Clearenv()
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini")
- p := SharedCredentialsProvider{}
- creds, err := p.Retrieve()
-
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
-}
-
-func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILEAbsPath(t *testing.T) {
- os.Clearenv()
- wd, err := os.Getwd()
- assert.NoError(t, err)
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", filepath.Join(wd, "example.ini"))
- p := SharedCredentialsProvider{}
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
-}
-
-func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) {
- os.Clearenv()
- os.Setenv("AWS_PROFILE", "no_token")
-
- p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Empty(t, creds.SessionToken, "Expect no token")
-}
-
-func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) {
- os.Clearenv()
-
- p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"}
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Empty(t, creds.SessionToken, "Expect no token")
-}
-
-func TestSharedCredentialsProviderColonInCredFile(t *testing.T) {
- os.Clearenv()
-
- p := SharedCredentialsProvider{Filename: "example.ini", Profile: "with_colon"}
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Empty(t, creds.SessionToken, "Expect no token")
-}
-
-func BenchmarkSharedCredentialsProvider(b *testing.B) {
- os.Clearenv()
-
- p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
- _, err := p.Retrieve()
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := p.Retrieve()
- if err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
deleted file mode 100644
index 4f5dab3..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package credentials
-
-import (
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// StaticProviderName provides a name of Static provider
-const StaticProviderName = "StaticProvider"
-
-var (
- // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
- //
- // @readonly
- ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
-)
-
-// A StaticProvider is a set of credentials which are set programmatically,
-// and will never expire.
-type StaticProvider struct {
- Value
-}
-
-// NewStaticCredentials returns a pointer to a new Credentials object
-// wrapping a static credentials value provider.
-func NewStaticCredentials(id, secret, token string) *Credentials {
- return NewCredentials(&StaticProvider{Value: Value{
- AccessKeyID: id,
- SecretAccessKey: secret,
- SessionToken: token,
- }})
-}
-
-// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
-// wrapping the static credentials value provide. Same as NewStaticCredentials
-// but takes the creds Value instead of individual fields
-func NewStaticCredentialsFromCreds(creds Value) *Credentials {
- return NewCredentials(&StaticProvider{Value: creds})
-}
-
-// Retrieve returns the credentials or error if the credentials are invalid.
-func (s *StaticProvider) Retrieve() (Value, error) {
- if s.AccessKeyID == "" || s.SecretAccessKey == "" {
- return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
- }
-
- if len(s.Value.ProviderName) == 0 {
- s.Value.ProviderName = StaticProviderName
- }
- return s.Value, nil
-}
-
-// IsExpired returns if the credentials are expired.
-//
-// For StaticProvider, the credentials never expired.
-func (s *StaticProvider) IsExpired() bool {
- return false
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go
deleted file mode 100644
index ea01236..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package credentials
-
-import (
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestStaticProviderGet(t *testing.T) {
- s := StaticProvider{
- Value: Value{
- AccessKeyID: "AKID",
- SecretAccessKey: "SECRET",
- SessionToken: "",
- },
- }
-
- creds, err := s.Retrieve()
- assert.Nil(t, err, "Expect no error")
- assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
- assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Empty(t, creds.SessionToken, "Expect no session token")
-}
-
-func TestStaticProviderIsExpired(t *testing.T) {
- s := StaticProvider{
- Value: Value{
- AccessKeyID: "AKID",
- SecretAccessKey: "SECRET",
- SessionToken: "",
- },
- }
-
- assert.False(t, s.IsExpired(), "Expect static credentials to never expire")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
deleted file mode 100644
index 30c847a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Package stscreds are credential Providers to retrieve STS AWS credentials.
-//
-// STS provides multiple ways to retrieve credentials which can be used when making
-// future AWS service API operation calls.
-package stscreds
-
-import (
- "fmt"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/service/sts"
-)
-
-// ProviderName provides a name of AssumeRole provider
-const ProviderName = "AssumeRoleProvider"
-
-// AssumeRoler represents the minimal subset of the STS client API used by this provider.
-type AssumeRoler interface {
- AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
-}
-
-// DefaultDuration is the default amount of time in minutes that the credentials
-// will be valid for.
-var DefaultDuration = time.Duration(15) * time.Minute
-
-// AssumeRoleProvider retrieves temporary credentials from the STS service, and
-// keeps track of their expiration time. This provider must be used explicitly,
-// as it is not included in the credentials chain.
-type AssumeRoleProvider struct {
- credentials.Expiry
-
- // STS client to make assume role request with.
- Client AssumeRoler
-
- // Role to be assumed.
- RoleARN string
-
- // Session name, if you wish to reuse the credentials elsewhere.
- RoleSessionName string
-
- // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
- Duration time.Duration
-
- // Optional ExternalID to pass along, defaults to nil if not set.
- ExternalID *string
-
- // The policy plain text must be 2048 bytes or shorter. However, an internal
- // conversion compresses it into a packed binary format with a separate limit.
- // The PackedPolicySize response element indicates by percentage how close to
- // the upper size limit the policy is, with 100% equaling the maximum allowed
- // size.
- Policy *string
-
- // The identification number of the MFA device that is associated with the user
- // who is making the AssumeRole call. Specify this value if the trust policy
- // of the role being assumed includes a condition that requires MFA authentication.
- // The value is either the serial number for a hardware device (such as GAHT12345678)
- // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
- SerialNumber *string
-
- // The value provided by the MFA device, if the trust policy of the role being
- // assumed requires MFA (that is, if the policy includes a condition that tests
- // for MFA). If the role being assumed requires MFA and if the TokenCode value
- // is missing or expired, the AssumeRole call returns an "access denied" error.
- TokenCode *string
-
- // ExpiryWindow will allow the credentials to trigger refreshing prior to
- // the credentials actually expiring. This is beneficial so race conditions
- // with expiring credentials do not cause request to fail unexpectedly
- // due to ExpiredTokenException exceptions.
- //
- // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
- // 10 seconds before the credentials are actually expired.
- //
- // If ExpiryWindow is 0 or less it will be ignored.
- ExpiryWindow time.Duration
-}
-
-// NewCredentials returns a pointer to a new Credentials object wrapping the
-// AssumeRoleProvider. The credentials will expire every 15 minutes and the
-// role will be named after a nanosecond timestamp of this operation.
-//
-// Takes a Config provider to create the STS client. The ConfigProvider is
-// satisfied by the session.Session type.
-func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
- p := &AssumeRoleProvider{
- Client: sts.New(c),
- RoleARN: roleARN,
- Duration: DefaultDuration,
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
-// AssumeRoleProvider. The credentials will expire every 15 minutes and the
-// role will be named after a nanosecond timestamp of this operation.
-//
-// Takes an AssumeRoler which can be satisfiede by the STS client.
-func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
- p := &AssumeRoleProvider{
- Client: svc,
- RoleARN: roleARN,
- Duration: DefaultDuration,
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// Retrieve generates a new set of temporary credentials using STS.
-func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
-
- // Apply defaults where parameters are not set.
- if p.RoleSessionName == "" {
- // Try to work out a role name that will hopefully end up unique.
- p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
- }
- if p.Duration == 0 {
- // Expire as often as AWS permits.
- p.Duration = DefaultDuration
- }
- input := &sts.AssumeRoleInput{
- DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
- RoleArn: aws.String(p.RoleARN),
- RoleSessionName: aws.String(p.RoleSessionName),
- ExternalId: p.ExternalID,
- }
- if p.Policy != nil {
- input.Policy = p.Policy
- }
- if p.SerialNumber != nil && p.TokenCode != nil {
- input.SerialNumber = p.SerialNumber
- input.TokenCode = p.TokenCode
- }
- roleOutput, err := p.Client.AssumeRole(input)
-
- if err != nil {
- return credentials.Value{ProviderName: ProviderName}, err
- }
-
- // We will proactively generate new credentials before they expire.
- p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
-
- return credentials.Value{
- AccessKeyID: *roleOutput.Credentials.AccessKeyId,
- SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
- SessionToken: *roleOutput.Credentials.SessionToken,
- ProviderName: ProviderName,
- }, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go
deleted file mode 100644
index 6bd6e91..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package stscreds
-
-import (
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/stretchr/testify/assert"
-)
-
-type stubSTS struct {
-}
-
-func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
- expiry := time.Now().Add(60 * time.Minute)
- return &sts.AssumeRoleOutput{
- Credentials: &sts.Credentials{
- // Just reflect the role arn to the provider.
- AccessKeyId: input.RoleArn,
- SecretAccessKey: aws.String("assumedSecretAccessKey"),
- SessionToken: aws.String("assumedSessionToken"),
- Expiration: &expiry,
- },
- }, nil
-}
-
-func TestAssumeRoleProvider(t *testing.T) {
- stub := &stubSTS{}
- p := &AssumeRoleProvider{
- Client: stub,
- RoleARN: "roleARN",
- }
-
- creds, err := p.Retrieve()
- assert.Nil(t, err, "Expect no error")
-
- assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN")
- assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match")
- assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match")
-}
-
-func BenchmarkAssumeRoleProvider(b *testing.B) {
- stub := &stubSTS{}
- p := &AssumeRoleProvider{
- Client: stub,
- RoleARN: "roleARN",
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- if _, err := p.Retrieve(); err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
deleted file mode 100644
index 8dbbf67..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Package defaults is a collection of helpers to retrieve the SDK's default
-// configuration and handlers.
-//
-// Generally this package shouldn't be used directly, but session.Session
-// instead. This package is useful when you need to reset the defaults
-// of a session or service client to the SDK defaults before setting
-// additional parameters.
-package defaults
-
-import (
- "fmt"
- "net/http"
- "os"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/corehandlers"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
- "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/endpoints"
-)
-
-// A Defaults provides a collection of default values for SDK clients.
-type Defaults struct {
- Config *aws.Config
- Handlers request.Handlers
-}
-
-// Get returns the SDK's default values with Config and handlers pre-configured.
-func Get() Defaults {
- cfg := Config()
- handlers := Handlers()
- cfg.Credentials = CredChain(cfg, handlers)
-
- return Defaults{
- Config: cfg,
- Handlers: handlers,
- }
-}
-
-// Config returns the default configuration without credentials.
-// To retrieve a config with credentials also included use
-// `defaults.Get().Config` instead.
-//
-// Generally you shouldn't need to use this method directly, but
-// is available if you need to reset the configuration of an
-// existing service client or session.
-func Config() *aws.Config {
- return aws.NewConfig().
- WithCredentials(credentials.AnonymousCredentials).
- WithRegion(os.Getenv("AWS_REGION")).
- WithHTTPClient(http.DefaultClient).
- WithMaxRetries(aws.UseServiceDefaultRetries).
- WithLogger(aws.NewDefaultLogger()).
- WithLogLevel(aws.LogOff).
- WithSleepDelay(time.Sleep)
-}
-
-// Handlers returns the default request handlers.
-//
-// Generally you shouldn't need to use this method directly, but
-// is available if you need to reset the request handlers of an
-// existing service client or session.
-func Handlers() request.Handlers {
- var handlers request.Handlers
-
- handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
- handlers.Validate.AfterEachFn = request.HandlerListStopOnError
- handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
- handlers.Build.AfterEachFn = request.HandlerListStopOnError
- handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
- handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
- handlers.Send.PushBackNamed(corehandlers.SendHandler)
- handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
- handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
-
- return handlers
-}
-
-// CredChain returns the default credential chain.
-//
-// Generally you shouldn't need to use this method directly, but
-// is available if you need to reset the credentials of an
-// existing service client or session's Config.
-func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
- return credentials.NewCredentials(&credentials.ChainProvider{
- VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
- Providers: []credentials.Provider{
- &credentials.EnvProvider{},
- &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
- RemoteCredProvider(*cfg, handlers),
- },
- })
-}
-
-// RemoteCredProvider returns a credenitials provider for the default remote
-// endpoints such as EC2 or ECS Roles.
-func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
- ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
-
- if len(ecsCredURI) > 0 {
- return ecsCredProvider(cfg, handlers, ecsCredURI)
- }
-
- return ec2RoleProvider(cfg, handlers)
-}
-
-func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider {
- const host = `169.254.170.2`
-
- return endpointcreds.NewProviderClient(cfg, handlers,
- fmt.Sprintf("http://%s%s", host, uri),
- func(p *endpointcreds.Provider) {
- p.ExpiryWindow = 5 * time.Minute
- },
- )
-}
-
-func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
- endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName,
- aws.StringValue(cfg.Region), true, false)
-
- return &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion),
- ExpiryWindow: 5 * time.Minute,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults_test.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults_test.go
deleted file mode 100644
index 6822674..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package defaults
-
-import (
- "fmt"
- "os"
- "testing"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
- "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/stretchr/testify/assert"
-)
-
-func TestECSCredProvider(t *testing.T) {
- defer os.Clearenv()
- os.Setenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/abc/123")
-
- provider := RemoteCredProvider(aws.Config{}, request.Handlers{})
-
- assert.NotNil(t, provider)
-
- ecsProvider, ok := provider.(*endpointcreds.Provider)
- assert.NotNil(t, ecsProvider)
- assert.True(t, ok)
-
- assert.Equal(t, fmt.Sprintf("http://169.254.170.2/abc/123"),
- ecsProvider.Client.Endpoint)
-}
-
-func TestDefaultEC2RoleProvider(t *testing.T) {
- provider := RemoteCredProvider(aws.Config{}, request.Handlers{})
-
- assert.NotNil(t, provider)
-
- ec2Provider, ok := provider.(*ec2rolecreds.EC2RoleProvider)
- assert.NotNil(t, ec2Provider)
- assert.True(t, ok)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
deleted file mode 100644
index e5755d1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package ec2metadata
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "path"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// GetMetadata uses the path provided to request information from the EC2
-// instance metdata service. The content will be returned as a string, or
-// error if the request failed.
-func (c *EC2Metadata) GetMetadata(p string) (string, error) {
- op := &request.Operation{
- Name: "GetMetadata",
- HTTPMethod: "GET",
- HTTPPath: path.Join("/", "meta-data", p),
- }
-
- output := &metadataOutput{}
- req := c.NewRequest(op, nil, output)
-
- return output.Content, req.Send()
-}
-
-// GetUserData returns the userdata that was configured for the service. If
-// there is no user-data setup for the EC2 instance a "NotFoundError" error
-// code will be returned.
-func (c *EC2Metadata) GetUserData() (string, error) {
- op := &request.Operation{
- Name: "GetUserData",
- HTTPMethod: "GET",
- HTTPPath: path.Join("/", "user-data"),
- }
-
- output := &metadataOutput{}
- req := c.NewRequest(op, nil, output)
- req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
- if r.HTTPResponse.StatusCode == http.StatusNotFound {
- r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
- }
- })
-
- return output.Content, req.Send()
-}
-
-// GetDynamicData uses the path provided to request information from the EC2
-// instance metadata service for dynamic data. The content will be returned
-// as a string, or error if the request failed.
-func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
- op := &request.Operation{
- Name: "GetDynamicData",
- HTTPMethod: "GET",
- HTTPPath: path.Join("/", "dynamic", p),
- }
-
- output := &metadataOutput{}
- req := c.NewRequest(op, nil, output)
-
- return output.Content, req.Send()
-}
-
-// GetInstanceIdentityDocument retrieves an identity document describing an
-// instance. Error is returned if the request fails or is unable to parse
-// the response.
-func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
- resp, err := c.GetDynamicData("instance-identity/document")
- if err != nil {
- return EC2InstanceIdentityDocument{},
- awserr.New("EC2MetadataRequestError",
- "failed to get EC2 instance identity document", err)
- }
-
- doc := EC2InstanceIdentityDocument{}
- if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
- return EC2InstanceIdentityDocument{},
- awserr.New("SerializationError",
- "failed to decode EC2 instance identity document", err)
- }
-
- return doc, nil
-}
-
-// IAMInfo retrieves IAM info from the metadata API
-func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
- resp, err := c.GetMetadata("iam/info")
- if err != nil {
- return EC2IAMInfo{},
- awserr.New("EC2MetadataRequestError",
- "failed to get EC2 IAM info", err)
- }
-
- info := EC2IAMInfo{}
- if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
- return EC2IAMInfo{},
- awserr.New("SerializationError",
- "failed to decode EC2 IAM info", err)
- }
-
- if info.Code != "Success" {
- errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
- return EC2IAMInfo{},
- awserr.New("EC2MetadataError", errMsg, nil)
- }
-
- return info, nil
-}
-
-// Region returns the region the instance is running in.
-func (c *EC2Metadata) Region() (string, error) {
- resp, err := c.GetMetadata("placement/availability-zone")
- if err != nil {
- return "", err
- }
-
- // returns region without the suffix. Eg: us-west-2a becomes us-west-2
- return resp[:len(resp)-1], nil
-}
-
-// Available returns if the application has access to the EC2 Metadata service.
-// Can be used to determine if application is running within an EC2 Instance and
-// the metadata service is available.
-func (c *EC2Metadata) Available() bool {
- if _, err := c.GetMetadata("instance-id"); err != nil {
- return false
- }
-
- return true
-}
-
-// An EC2IAMInfo provides the shape for unmarshalling
-// an IAM info from the metadata API
-type EC2IAMInfo struct {
- Code string
- LastUpdated time.Time
- InstanceProfileArn string
- InstanceProfileID string
-}
-
-// An EC2InstanceIdentityDocument provides the shape for unmarshalling
-// an instance identity document
-type EC2InstanceIdentityDocument struct {
- DevpayProductCodes []string `json:"devpayProductCodes"`
- AvailabilityZone string `json:"availabilityZone"`
- PrivateIP string `json:"privateIp"`
- Version string `json:"version"`
- Region string `json:"region"`
- InstanceID string `json:"instanceId"`
- BillingProducts []string `json:"billingProducts"`
- InstanceType string `json:"instanceType"`
- AccountID string `json:"accountId"`
- PendingTime time.Time `json:"pendingTime"`
- ImageID string `json:"imageId"`
- KernelID string `json:"kernelId"`
- RamdiskID string `json:"ramdiskId"`
- Architecture string `json:"architecture"`
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go
deleted file mode 100644
index 35e7578..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package ec2metadata_test
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "path"
- "strings"
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting/unit"
-)
-
-const instanceIdentityDocument = `{
- "devpayProductCodes" : null,
- "availabilityZone" : "us-east-1d",
- "privateIp" : "10.158.112.84",
- "version" : "2010-08-31",
- "region" : "us-east-1",
- "instanceId" : "i-1234567890abcdef0",
- "billingProducts" : null,
- "instanceType" : "t1.micro",
- "accountId" : "123456789012",
- "pendingTime" : "2015-11-19T16:32:11Z",
- "imageId" : "ami-5fb8c835",
- "kernelId" : "aki-919dcaf8",
- "ramdiskId" : null,
- "architecture" : "x86_64"
-}`
-
-const validIamInfo = `{
- "Code" : "Success",
- "LastUpdated" : "2016-03-17T12:27:32Z",
- "InstanceProfileArn" : "arn:aws:iam::123456789012:instance-profile/my-instance-profile",
- "InstanceProfileId" : "AIPAABCDEFGHIJKLMN123"
-}`
-
-const unsuccessfulIamInfo = `{
- "Code" : "Failed",
- "LastUpdated" : "2016-03-17T12:27:32Z",
- "InstanceProfileArn" : "arn:aws:iam::123456789012:instance-profile/my-instance-profile",
- "InstanceProfileId" : "AIPAABCDEFGHIJKLMN123"
-}`
-
-func initTestServer(path string, resp string) *httptest.Server {
- return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.RequestURI != path {
- http.Error(w, "not found", http.StatusNotFound)
- return
- }
-
- w.Write([]byte(resp))
- }))
-}
-
-func TestEndpoint(t *testing.T) {
- c := ec2metadata.New(unit.Session)
- op := &request.Operation{
- Name: "GetMetadata",
- HTTPMethod: "GET",
- HTTPPath: path.Join("/", "meta-data", "testpath"),
- }
-
- req := c.NewRequest(op, nil, nil)
- assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint)
- assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String())
-}
-
-func TestGetMetadata(t *testing.T) {
- server := initTestServer(
- "/latest/meta-data/some/path",
- "success", // real response includes suffix
- )
- defer server.Close()
- c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
-
- resp, err := c.GetMetadata("some/path")
-
- assert.NoError(t, err)
- assert.Equal(t, "success", resp)
-}
-
-func TestGetUserData(t *testing.T) {
- server := initTestServer(
- "/latest/user-data",
- "success", // real response includes suffix
- )
- defer server.Close()
- c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
-
- resp, err := c.GetUserData()
-
- assert.NoError(t, err)
- assert.Equal(t, "success", resp)
-}
-
-func TestGetUserData_Error(t *testing.T) {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- reader := strings.NewReader(`
-
-
-
- 404 - Not Found
-
-
-
404 - Not Found
-
-`)
- w.Header().Set("Content-Type", "text/html")
- w.Header().Set("Content-Length", fmt.Sprintf("%d", reader.Len()))
- w.WriteHeader(http.StatusNotFound)
- io.Copy(w, reader)
- }))
-
- defer server.Close()
- c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
-
- resp, err := c.GetUserData()
- assert.Error(t, err)
- assert.Empty(t, resp)
-
- aerr, ok := err.(awserr.Error)
- assert.True(t, ok)
- assert.Equal(t, "NotFoundError", aerr.Code())
-}
-
-func TestGetRegion(t *testing.T) {
- server := initTestServer(
- "/latest/meta-data/placement/availability-zone",
- "us-west-2a", // real response includes suffix
- )
- defer server.Close()
- c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
-
- region, err := c.Region()
-
- assert.NoError(t, err)
- assert.Equal(t, "us-west-2", region)
-}
-
-func TestMetadataAvailable(t *testing.T) {
- server := initTestServer(
- "/latest/meta-data/instance-id",
- "instance-id",
- )
- defer server.Close()
- c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
-
- available := c.Available()
-
- assert.True(t, available)
-}
-
-func TestMetadataIAMInfo_success(t *testing.T) {
- server := initTestServer(
- "/latest/meta-data/iam/info",
- validIamInfo,
- )
- defer server.Close()
- c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
-
- iamInfo, err := c.IAMInfo()
- assert.NoError(t, err)
- assert.Equal(t, "Success", iamInfo.Code)
- assert.Equal(t, "arn:aws:iam::123456789012:instance-profile/my-instance-profile", iamInfo.InstanceProfileArn)
- assert.Equal(t, "AIPAABCDEFGHIJKLMN123", iamInfo.InstanceProfileID)
-}
-
-func TestMetadataIAMInfo_failure(t *testing.T) {
- server := initTestServer(
- "/latest/meta-data/iam/info",
- unsuccessfulIamInfo,
- )
- defer server.Close()
- c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
-
- iamInfo, err := c.IAMInfo()
- assert.NotNil(t, err)
- assert.Equal(t, "", iamInfo.Code)
- assert.Equal(t, "", iamInfo.InstanceProfileArn)
- assert.Equal(t, "", iamInfo.InstanceProfileID)
-}
-
-func TestMetadataNotAvailable(t *testing.T) {
- c := ec2metadata.New(unit.Session)
- c.Handlers.Send.Clear()
- c.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = &http.Response{
- StatusCode: int(0),
- Status: http.StatusText(int(0)),
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
- }
- r.Error = awserr.New("RequestError", "send request failed", nil)
- r.Retryable = aws.Bool(true) // network errors are retryable
- })
-
- available := c.Available()
-
- assert.False(t, available)
-}
-
-func TestMetadataErrorResponse(t *testing.T) {
- c := ec2metadata.New(unit.Session)
- c.Handlers.Send.Clear()
- c.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = &http.Response{
- StatusCode: http.StatusBadRequest,
- Status: http.StatusText(http.StatusBadRequest),
- Body: ioutil.NopCloser(strings.NewReader("error message text")),
- }
- r.Retryable = aws.Bool(false) // network errors are retryable
- })
-
- data, err := c.GetMetadata("uri/path")
- assert.Empty(t, data)
- assert.Contains(t, err.Error(), "error message text")
-}
-
-func TestEC2RoleProviderInstanceIdentity(t *testing.T) {
- server := initTestServer(
- "/latest/dynamic/instance-identity/document",
- instanceIdentityDocument,
- )
- defer server.Close()
- c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
-
- doc, err := c.GetInstanceIdentityDocument()
- assert.Nil(t, err, "Expect no error, %v", err)
- assert.Equal(t, doc.AccountID, "123456789012")
- assert.Equal(t, doc.AvailabilityZone, "us-east-1d")
- assert.Equal(t, doc.Region, "us-east-1")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
deleted file mode 100644
index 5b4379d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Package ec2metadata provides the client for making API calls to the
-// EC2 Metadata service.
-package ec2metadata
-
-import (
- "bytes"
- "errors"
- "io"
- "net/http"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// ServiceName is the name of the service.
-const ServiceName = "ec2metadata"
-
-// A EC2Metadata is an EC2 Metadata service Client.
-type EC2Metadata struct {
- *client.Client
-}
-
-// New creates a new instance of the EC2Metadata client with a session.
-// This client is safe to use across multiple goroutines.
-//
-//
-// Example:
-// // Create a EC2Metadata client from just a session.
-// svc := ec2metadata.New(mySession)
-//
-// // Create a EC2Metadata client with additional configuration
-// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
-func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
- c := p.ClientConfig(ServiceName, cfgs...)
- return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
-}
-
-// NewClient returns a new EC2Metadata client. Should be used to create
-// a client when not using a session. Generally using just New with a session
-// is preferred.
-//
-// If an unmodified HTTP client is provided from the stdlib default, or no client
-// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
-// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
-func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
- if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
- // If the http client is unmodified and this feature is not disabled
- // set custom timeouts for EC2Metadata requests.
- cfg.HTTPClient = &http.Client{
- // use a shorter timeout than default because the metadata
- // service is local if it is running, and to fail faster
- // if not running on an ec2 instance.
- Timeout: 5 * time.Second,
- }
- }
-
- svc := &EC2Metadata{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: ServiceName,
- Endpoint: endpoint,
- APIVersion: "latest",
- },
- handlers,
- ),
- }
-
- svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
- svc.Handlers.UnmarshalError.PushBack(unmarshalError)
- svc.Handlers.Validate.Clear()
- svc.Handlers.Validate.PushBack(validateEndpointHandler)
-
- // Add additional options to the service config
- for _, option := range opts {
- option(svc.Client)
- }
-
- return svc
-}
-
-func httpClientZero(c *http.Client) bool {
- return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
-}
-
-type metadataOutput struct {
- Content string
-}
-
-func unmarshalHandler(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
- b := &bytes.Buffer{}
- if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
- r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
- return
- }
-
- if data, ok := r.Data.(*metadataOutput); ok {
- data.Content = b.String()
- }
-}
-
-func unmarshalError(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
- b := &bytes.Buffer{}
- if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
- r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
- return
- }
-
- // Response body format is not consistent between metadata endpoints.
- // Grab the error message as a string and include that as the source error
- r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
-}
-
-func validateEndpointHandler(r *request.Request) {
- if r.ClientInfo.Endpoint == "" {
- r.Error = aws.ErrMissingEndpoint
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go
deleted file mode 100644
index c2bc215..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package ec2metadata_test
-
-import (
- "net/http"
- "net/http/httptest"
- "sync"
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/stretchr/testify/assert"
-)
-
-func TestClientOverrideDefaultHTTPClientTimeout(t *testing.T) {
- svc := ec2metadata.New(unit.Session)
-
- assert.NotEqual(t, http.DefaultClient, svc.Config.HTTPClient)
- assert.Equal(t, 5*time.Second, svc.Config.HTTPClient.Timeout)
-}
-
-func TestClientNotOverrideDefaultHTTPClientTimeout(t *testing.T) {
- http.DefaultClient.Transport = &http.Transport{}
- defer func() {
- http.DefaultClient.Transport = nil
- }()
-
- svc := ec2metadata.New(unit.Session)
-
- assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient)
-
- tr, ok := svc.Config.HTTPClient.Transport.(*http.Transport)
- assert.True(t, ok)
- assert.NotNil(t, tr)
- assert.Nil(t, tr.Dial)
-}
-
-func TestClientDisableOverrideDefaultHTTPClientTimeout(t *testing.T) {
- svc := ec2metadata.New(unit.Session, aws.NewConfig().WithEC2MetadataDisableTimeoutOverride(true))
-
- assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient)
-}
-
-func TestClientOverrideDefaultHTTPClientTimeoutRace(t *testing.T) {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("us-east-1a"))
- }))
-
- cfg := aws.NewConfig().WithEndpoint(server.URL)
- runEC2MetadataClients(t, cfg, 100)
-}
-
-func TestClientOverrideDefaultHTTPClientTimeoutRaceWithTransport(t *testing.T) {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("us-east-1a"))
- }))
-
- cfg := aws.NewConfig().WithEndpoint(server.URL).WithHTTPClient(&http.Client{
- Transport: http.DefaultTransport,
- })
-
- runEC2MetadataClients(t, cfg, 100)
-}
-
-func runEC2MetadataClients(t *testing.T, cfg *aws.Config, atOnce int) {
- var wg sync.WaitGroup
- wg.Add(atOnce)
- for i := 0; i < atOnce; i++ {
- go func() {
- svc := ec2metadata.New(unit.Session, cfg)
- _, err := svc.Region()
- assert.NoError(t, err)
- wg.Done()
- }()
- }
- wg.Wait()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
deleted file mode 100644
index 5766361..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package aws
-
-import "github.com/aws/aws-sdk-go/aws/awserr"
-
-var (
- // ErrMissingRegion is an error that is returned if region configuration is
- // not found.
- //
- // @readonly
- ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
-
- // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
- // resolved for a service.
- //
- // @readonly
- ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
-)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
deleted file mode 100644
index db87188..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package aws
-
-import (
- "log"
- "os"
-)
-
-// A LogLevelType defines the level logging should be performed at. Used to instruct
-// the SDK which statements should be logged.
-type LogLevelType uint
-
-// LogLevel returns the pointer to a LogLevel. Should be used to workaround
-// not being able to take the address of a non-composite literal.
-func LogLevel(l LogLevelType) *LogLevelType {
- return &l
-}
-
-// Value returns the LogLevel value or the default value LogOff if the LogLevel
-// is nil. Safe to use on nil value LogLevelTypes.
-func (l *LogLevelType) Value() LogLevelType {
- if l != nil {
- return *l
- }
- return LogOff
-}
-
-// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
-// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
-// LogLevel is nill, will default to LogOff comparison.
-func (l *LogLevelType) Matches(v LogLevelType) bool {
- c := l.Value()
- return c&v == v
-}
-
-// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
-// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
-// to LogOff comparison.
-func (l *LogLevelType) AtLeast(v LogLevelType) bool {
- c := l.Value()
- return c >= v
-}
-
-const (
- // LogOff states that no logging should be performed by the SDK. This is the
- // default state of the SDK, and should be use to disable all logging.
- LogOff LogLevelType = iota * 0x1000
-
- // LogDebug state that debug output should be logged by the SDK. This should
- // be used to inspect request made and responses received.
- LogDebug
-)
-
-// Debug Logging Sub Levels
-const (
- // LogDebugWithSigning states that the SDK should log request signing and
- // presigning events. This should be used to log the signing details of
- // requests for debugging. Will also enable LogDebug.
- LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
-
- // LogDebugWithHTTPBody states the SDK should log HTTP request and response
- // HTTP bodys in addition to the headers and path. This should be used to
- // see the body content of requests and responses made while using the SDK
- // Will also enable LogDebug.
- LogDebugWithHTTPBody
-
- // LogDebugWithRequestRetries states the SDK should log when service requests will
- // be retried. This should be used to log when you want to log when service
- // requests are being retried. Will also enable LogDebug.
- LogDebugWithRequestRetries
-
- // LogDebugWithRequestErrors states the SDK should log when service requests fail
- // to build, send, validate, or unmarshal.
- LogDebugWithRequestErrors
-)
-
-// A Logger is a minimalistic interface for the SDK to log messages to. Should
-// be used to provide custom logging writers for the SDK to use.
-type Logger interface {
- Log(...interface{})
-}
-
-// A LoggerFunc is a convenience type to convert a function taking a variadic
-// list of arguments and wrap it so the Logger interface can be used.
-//
-// Example:
-// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
-// fmt.Fprintln(os.Stdout, args...)
-// })})
-type LoggerFunc func(...interface{})
-
-// Log calls the wrapped function with the arguments provided
-func (f LoggerFunc) Log(args ...interface{}) {
- f(args...)
-}
-
-// NewDefaultLogger returns a Logger which will write log messages to stdout, and
-// use same formatting runes as the stdlib log.Logger
-func NewDefaultLogger() Logger {
- return &defaultLogger{
- logger: log.New(os.Stdout, "", log.LstdFlags),
- }
-}
-
-// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
-type defaultLogger struct {
- logger *log.Logger
-}
-
-// Log logs the parameters to the stdlib logger. See log.Println.
-func (l defaultLogger) Log(args ...interface{}) {
- l.logger.Println(args...)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
deleted file mode 100644
index 5279c19..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package request
-
-import (
- "fmt"
- "strings"
-)
-
-// A Handlers provides a collection of request handlers for various
-// stages of handling requests.
-type Handlers struct {
- Validate HandlerList
- Build HandlerList
- Sign HandlerList
- Send HandlerList
- ValidateResponse HandlerList
- Unmarshal HandlerList
- UnmarshalMeta HandlerList
- UnmarshalError HandlerList
- Retry HandlerList
- AfterRetry HandlerList
-}
-
-// Copy returns of this handler's lists.
-func (h *Handlers) Copy() Handlers {
- return Handlers{
- Validate: h.Validate.copy(),
- Build: h.Build.copy(),
- Sign: h.Sign.copy(),
- Send: h.Send.copy(),
- ValidateResponse: h.ValidateResponse.copy(),
- Unmarshal: h.Unmarshal.copy(),
- UnmarshalError: h.UnmarshalError.copy(),
- UnmarshalMeta: h.UnmarshalMeta.copy(),
- Retry: h.Retry.copy(),
- AfterRetry: h.AfterRetry.copy(),
- }
-}
-
-// Clear removes callback functions for all handlers
-func (h *Handlers) Clear() {
- h.Validate.Clear()
- h.Build.Clear()
- h.Send.Clear()
- h.Sign.Clear()
- h.Unmarshal.Clear()
- h.UnmarshalMeta.Clear()
- h.UnmarshalError.Clear()
- h.ValidateResponse.Clear()
- h.Retry.Clear()
- h.AfterRetry.Clear()
-}
-
-// A HandlerListRunItem represents an entry in the HandlerList which
-// is being run.
-type HandlerListRunItem struct {
- Index int
- Handler NamedHandler
- Request *Request
-}
-
-// A HandlerList manages zero or more handlers in a list.
-type HandlerList struct {
- list []NamedHandler
-
- // Called after each request handler in the list is called. If set
- // and the func returns true the HandlerList will continue to iterate
- // over the request handlers. If false is returned the HandlerList
- // will stop iterating.
- //
- // Should be used if extra logic to be performed between each handler
- // in the list. This can be used to terminate a list's iteration
- // based on a condition such as error like, HandlerListStopOnError.
- // Or for logging like HandlerListLogItem.
- AfterEachFn func(item HandlerListRunItem) bool
-}
-
-// A NamedHandler is a struct that contains a name and function callback.
-type NamedHandler struct {
- Name string
- Fn func(*Request)
-}
-
-// copy creates a copy of the handler list.
-func (l *HandlerList) copy() HandlerList {
- n := HandlerList{
- AfterEachFn: l.AfterEachFn,
- }
- n.list = append([]NamedHandler{}, l.list...)
- return n
-}
-
-// Clear clears the handler list.
-func (l *HandlerList) Clear() {
- l.list = []NamedHandler{}
-}
-
-// Len returns the number of handlers in the list.
-func (l *HandlerList) Len() int {
- return len(l.list)
-}
-
-// PushBack pushes handler f to the back of the handler list.
-func (l *HandlerList) PushBack(f func(*Request)) {
- l.list = append(l.list, NamedHandler{"__anonymous", f})
-}
-
-// PushFront pushes handler f to the front of the handler list.
-func (l *HandlerList) PushFront(f func(*Request)) {
- l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
-}
-
-// PushBackNamed pushes named handler f to the back of the handler list.
-func (l *HandlerList) PushBackNamed(n NamedHandler) {
- l.list = append(l.list, n)
-}
-
-// PushFrontNamed pushes named handler f to the front of the handler list.
-func (l *HandlerList) PushFrontNamed(n NamedHandler) {
- l.list = append([]NamedHandler{n}, l.list...)
-}
-
-// Remove removes a NamedHandler n
-func (l *HandlerList) Remove(n NamedHandler) {
- newlist := []NamedHandler{}
- for _, m := range l.list {
- if m.Name != n.Name {
- newlist = append(newlist, m)
- }
- }
- l.list = newlist
-}
-
-// Run executes all handlers in the list with a given request object.
-func (l *HandlerList) Run(r *Request) {
- for i, h := range l.list {
- h.Fn(r)
- item := HandlerListRunItem{
- Index: i, Handler: h, Request: r,
- }
- if l.AfterEachFn != nil && !l.AfterEachFn(item) {
- return
- }
- }
-}
-
-// HandlerListLogItem logs the request handler and the state of the
-// request's Error value. Always returns true to continue iterating
-// request handlers in a HandlerList.
-func HandlerListLogItem(item HandlerListRunItem) bool {
- if item.Request.Config.Logger == nil {
- return true
- }
- item.Request.Config.Logger.Log("DEBUG: RequestHandler",
- item.Index, item.Handler.Name, item.Request.Error)
-
- return true
-}
-
-// HandlerListStopOnError returns false to stop the HandlerList iterating
-// over request handlers if Request.Error is not nil. True otherwise
-// to continue iterating.
-func HandlerListStopOnError(item HandlerListRunItem) bool {
- return item.Request.Error == nil
-}
-
-// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
-// header. If the extra parameters are provided they will be added as metadata to the
-// name/version pair resulting in the following format.
-// "name/version (extra0; extra1; ...)"
-// The user agent part will be concatenated with this current request's user agent string.
-func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
- ua := fmt.Sprintf("%s/%s", name, version)
- if len(extra) > 0 {
- ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
- }
- return func(r *Request) {
- AddToUserAgent(r, ua)
- }
-}
-
-// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
-// The input string will be concatenated with the current request's user agent string.
-func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
- return func(r *Request) {
- AddToUserAgent(r, s)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go
deleted file mode 100644
index b32a651..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package request_test
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-func TestHandlerList(t *testing.T) {
- s := ""
- r := &request.Request{}
- l := request.HandlerList{}
- l.PushBack(func(r *request.Request) {
- s += "a"
- r.Data = s
- })
- l.Run(r)
- assert.Equal(t, "a", s)
- assert.Equal(t, "a", r.Data)
-}
-
-func TestMultipleHandlers(t *testing.T) {
- r := &request.Request{}
- l := request.HandlerList{}
- l.PushBack(func(r *request.Request) { r.Data = nil })
- l.PushFront(func(r *request.Request) { r.Data = aws.Bool(true) })
- l.Run(r)
- if r.Data != nil {
- t.Error("Expected handler to execute")
- }
-}
-
-func TestNamedHandlers(t *testing.T) {
- l := request.HandlerList{}
- named := request.NamedHandler{Name: "Name", Fn: func(r *request.Request) {}}
- named2 := request.NamedHandler{Name: "NotName", Fn: func(r *request.Request) {}}
- l.PushBackNamed(named)
- l.PushBackNamed(named)
- l.PushBackNamed(named2)
- l.PushBack(func(r *request.Request) {})
- assert.Equal(t, 4, l.Len())
- l.Remove(named)
- assert.Equal(t, 2, l.Len())
-}
-
-func TestLoggedHandlers(t *testing.T) {
- expectedHandlers := []string{"name1", "name2"}
- l := request.HandlerList{}
- loggedHandlers := []string{}
- l.AfterEachFn = request.HandlerListLogItem
- cfg := aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
- loggedHandlers = append(loggedHandlers, args[2].(string))
- })}
-
- named1 := request.NamedHandler{Name: "name1", Fn: func(r *request.Request) {}}
- named2 := request.NamedHandler{Name: "name2", Fn: func(r *request.Request) {}}
- l.PushBackNamed(named1)
- l.PushBackNamed(named2)
- l.Run(&request.Request{Config: cfg})
-
- assert.Equal(t, expectedHandlers, loggedHandlers)
-}
-
-func TestStopHandlers(t *testing.T) {
- l := request.HandlerList{}
- stopAt := 1
- l.AfterEachFn = func(item request.HandlerListRunItem) bool {
- return item.Index != stopAt
- }
-
- called := 0
- l.PushBackNamed(request.NamedHandler{Name: "name1", Fn: func(r *request.Request) {
- called++
- }})
- l.PushBackNamed(request.NamedHandler{Name: "name2", Fn: func(r *request.Request) {
- called++
- }})
- l.PushBackNamed(request.NamedHandler{Name: "name3", Fn: func(r *request.Request) {
- assert.Fail(t, "third handler should not be called")
- }})
- l.Run(&request.Request{})
-
- assert.Equal(t, 2, called, "Expect only two handlers to be called")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
deleted file mode 100644
index 79f7960..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package request
-
-import (
- "io"
- "net/http"
- "net/url"
-)
-
-func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
- req := new(http.Request)
- *req = *r
- req.URL = &url.URL{}
- *req.URL = *r.URL
- req.Body = body
-
- req.Header = http.Header{}
- for k, v := range r.Header {
- for _, vv := range v {
- req.Header.Add(k, vv)
- }
- }
-
- return req
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_copy_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_copy_test.go
deleted file mode 100644
index 4a4f855..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_copy_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package request
-
-import (
- "bytes"
- "io/ioutil"
- "net/http"
- "net/url"
- "sync"
- "testing"
-)
-
-func TestRequestCopyRace(t *testing.T) {
- origReq := &http.Request{URL: &url.URL{}, Header: http.Header{}}
- origReq.Header.Set("Header", "OrigValue")
-
- var wg sync.WaitGroup
- for i := 0; i < 100; i++ {
- wg.Add(1)
- go func() {
- req := copyHTTPRequest(origReq, ioutil.NopCloser(&bytes.Buffer{}))
- req.Header.Set("Header", "Value")
- go func() {
- req2 := copyHTTPRequest(req, ioutil.NopCloser(&bytes.Buffer{}))
- req2.Header.Add("Header", "Value2")
- }()
- _ = req.Header.Get("Header")
- wg.Done()
- }()
- _ = origReq.Header.Get("Header")
- }
- origReq.Header.Get("Header")
-
- wg.Wait()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_retry_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_retry_test.go
deleted file mode 100644
index fc0f46f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_retry_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build go1.5
-
-package request_test
-
-import (
- "errors"
- "strings"
- "testing"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting/mock"
- "github.com/stretchr/testify/assert"
-)
-
-func TestRequestCancelRetry(t *testing.T) {
- c := make(chan struct{})
-
- reqNum := 0
- s := mock.NewMockClient(aws.NewConfig().WithMaxRetries(10))
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.Clear()
- s.Handlers.UnmarshalMeta.Clear()
- s.Handlers.UnmarshalError.Clear()
- s.Handlers.Send.PushFront(func(r *request.Request) {
- reqNum++
- r.Error = errors.New("net/http: canceled")
- })
- out := &testData{}
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
- r.HTTPRequest.Cancel = c
- close(c)
-
- err := r.Send()
- assert.True(t, strings.Contains(err.Error(), "canceled"))
- assert.Equal(t, 1, reqNum)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
deleted file mode 100644
index 02f07f4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package request
-
-import (
- "io"
- "sync"
-)
-
-// offsetReader is a thread-safe io.ReadCloser to prevent racing
-// with retrying requests
-type offsetReader struct {
- buf io.ReadSeeker
- lock sync.Mutex
- closed bool
-}
-
-func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
- reader := &offsetReader{}
- buf.Seek(offset, 0)
-
- reader.buf = buf
- return reader
-}
-
-// Close will close the instance of the offset reader's access to
-// the underlying io.ReadSeeker.
-func (o *offsetReader) Close() error {
- o.lock.Lock()
- defer o.lock.Unlock()
- o.closed = true
- return nil
-}
-
-// Read is a thread-safe read of the underlying io.ReadSeeker
-func (o *offsetReader) Read(p []byte) (int, error) {
- o.lock.Lock()
- defer o.lock.Unlock()
-
- if o.closed {
- return 0, io.EOF
- }
-
- return o.buf.Read(p)
-}
-
-// Seek is a thread-safe seeking operation.
-func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
- o.lock.Lock()
- defer o.lock.Unlock()
-
- return o.buf.Seek(offset, whence)
-}
-
-// CloseAndCopy will return a new offsetReader with a copy of the old buffer
-// and close the old buffer.
-func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
- o.Close()
- return newOffsetReader(o.buf, offset)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader_test.go
deleted file mode 100644
index 01856e3..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader_test.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package request
-
-import (
- "bytes"
- "io"
- "math/rand"
- "sync"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestOffsetReaderRead(t *testing.T) {
- buf := []byte("testData")
- reader := &offsetReader{buf: bytes.NewReader(buf)}
-
- tempBuf := make([]byte, len(buf))
-
- n, err := reader.Read(tempBuf)
-
- assert.Equal(t, n, len(buf))
- assert.Nil(t, err)
- assert.Equal(t, buf, tempBuf)
-}
-
-func TestOffsetReaderSeek(t *testing.T) {
- buf := []byte("testData")
- reader := newOffsetReader(bytes.NewReader(buf), 0)
-
- orig, err := reader.Seek(0, 1)
- assert.NoError(t, err)
- assert.Equal(t, int64(0), orig)
-
- n, err := reader.Seek(0, 2)
- assert.NoError(t, err)
- assert.Equal(t, int64(len(buf)), n)
-
- n, err = reader.Seek(orig, 0)
- assert.NoError(t, err)
- assert.Equal(t, int64(0), n)
-}
-
-func TestOffsetReaderClose(t *testing.T) {
- buf := []byte("testData")
- reader := &offsetReader{buf: bytes.NewReader(buf)}
-
- err := reader.Close()
- assert.Nil(t, err)
-
- tempBuf := make([]byte, len(buf))
- n, err := reader.Read(tempBuf)
- assert.Equal(t, n, 0)
- assert.Equal(t, err, io.EOF)
-}
-
-func TestOffsetReaderCloseAndCopy(t *testing.T) {
- buf := []byte("testData")
- tempBuf := make([]byte, len(buf))
- reader := &offsetReader{buf: bytes.NewReader(buf)}
-
- newReader := reader.CloseAndCopy(0)
-
- n, err := reader.Read(tempBuf)
- assert.Equal(t, n, 0)
- assert.Equal(t, err, io.EOF)
-
- n, err = newReader.Read(tempBuf)
- assert.Equal(t, n, len(buf))
- assert.Nil(t, err)
- assert.Equal(t, buf, tempBuf)
-}
-
-func TestOffsetReaderCloseAndCopyOffset(t *testing.T) {
- buf := []byte("testData")
- tempBuf := make([]byte, len(buf))
- reader := &offsetReader{buf: bytes.NewReader(buf)}
-
- newReader := reader.CloseAndCopy(4)
- n, err := newReader.Read(tempBuf)
- assert.Equal(t, n, len(buf)-4)
- assert.Nil(t, err)
-
- expected := []byte{'D', 'a', 't', 'a', 0, 0, 0, 0}
- assert.Equal(t, expected, tempBuf)
-}
-
-func TestOffsetReaderRace(t *testing.T) {
- wg := sync.WaitGroup{}
-
- f := func(reader *offsetReader) {
- defer wg.Done()
- var err error
- buf := make([]byte, 1)
- _, err = reader.Read(buf)
- for err != io.EOF {
- _, err = reader.Read(buf)
- }
-
- }
-
- closeFn := func(reader *offsetReader) {
- defer wg.Done()
- time.Sleep(time.Duration(rand.Intn(20)+1) * time.Millisecond)
- reader.Close()
- }
- for i := 0; i < 50; i++ {
- reader := &offsetReader{buf: bytes.NewReader(make([]byte, 1024*1024))}
- wg.Add(1)
- go f(reader)
- wg.Add(1)
- go closeFn(reader)
- }
- wg.Wait()
-}
-
-func BenchmarkOffsetReader(b *testing.B) {
- bufSize := 1024 * 1024 * 100
- buf := make([]byte, bufSize)
- reader := &offsetReader{buf: bytes.NewReader(buf)}
-
- tempBuf := make([]byte, 1024)
-
- for i := 0; i < b.N; i++ {
- reader.Read(tempBuf)
- }
-}
-
-func BenchmarkBytesReader(b *testing.B) {
- bufSize := 1024 * 1024 * 100
- buf := make([]byte, bufSize)
- reader := bytes.NewReader(buf)
-
- tempBuf := make([]byte, 1024)
-
- for i := 0; i < b.N; i++ {
- reader.Read(tempBuf)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
deleted file mode 100644
index 8ef9715..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ /dev/null
@@ -1,344 +0,0 @@
-package request
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "reflect"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
-)
-
-// A Request is the service request to be made.
-type Request struct {
- Config aws.Config
- ClientInfo metadata.ClientInfo
- Handlers Handlers
-
- Retryer
- Time time.Time
- ExpireTime time.Duration
- Operation *Operation
- HTTPRequest *http.Request
- HTTPResponse *http.Response
- Body io.ReadSeeker
- BodyStart int64 // offset from beginning of Body that the request body starts
- Params interface{}
- Error error
- Data interface{}
- RequestID string
- RetryCount int
- Retryable *bool
- RetryDelay time.Duration
- NotHoist bool
- SignedHeaderVals http.Header
- LastSignedAt time.Time
-
- built bool
-
- // Need to persist an intermideant body betweend the input Body and HTTP
- // request body because the HTTP Client's transport can maintain a reference
- // to the HTTP request's body after the client has returned. This value is
- // safe to use concurrently and rewraps the input Body for each HTTP request.
- safeBody *offsetReader
-}
-
-// An Operation is the service API operation to be made.
-type Operation struct {
- Name string
- HTTPMethod string
- HTTPPath string
- *Paginator
-}
-
-// Paginator keeps track of pagination configuration for an API operation.
-type Paginator struct {
- InputTokens []string
- OutputTokens []string
- LimitToken string
- TruncationToken string
-}
-
-// New returns a new Request pointer for the service API
-// operation and parameters.
-//
-// Params is any value of input parameters to be the request payload.
-// Data is pointer value to an object which the request's response
-// payload will be deserialized to.
-func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
- retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
-
- method := operation.HTTPMethod
- if method == "" {
- method = "POST"
- }
-
- httpReq, _ := http.NewRequest(method, "", nil)
-
- var err error
- httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
- if err != nil {
- httpReq.URL = &url.URL{}
- err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
- }
-
- r := &Request{
- Config: cfg,
- ClientInfo: clientInfo,
- Handlers: handlers.Copy(),
-
- Retryer: retryer,
- Time: time.Now(),
- ExpireTime: 0,
- Operation: operation,
- HTTPRequest: httpReq,
- Body: nil,
- Params: params,
- Error: err,
- Data: data,
- }
- r.SetBufferBody([]byte{})
-
- return r
-}
-
-// WillRetry returns if the request's can be retried.
-func (r *Request) WillRetry() bool {
- return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
-}
-
-// ParamsFilled returns if the request's parameters have been populated
-// and the parameters are valid. False is returned if no parameters are
-// provided or invalid.
-func (r *Request) ParamsFilled() bool {
- return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
-}
-
-// DataFilled returns true if the request's data for response deserialization
-// target has been set and is a valid. False is returned if data is not
-// set, or is invalid.
-func (r *Request) DataFilled() bool {
- return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
-}
-
-// SetBufferBody will set the request's body bytes that will be sent to
-// the service API.
-func (r *Request) SetBufferBody(buf []byte) {
- r.SetReaderBody(bytes.NewReader(buf))
-}
-
-// SetStringBody sets the body of the request to be backed by a string.
-func (r *Request) SetStringBody(s string) {
- r.SetReaderBody(strings.NewReader(s))
-}
-
-// SetReaderBody will set the request's body reader.
-func (r *Request) SetReaderBody(reader io.ReadSeeker) {
- r.Body = reader
- r.ResetBody()
-}
-
-// Presign returns the request's signed URL. Error will be returned
-// if the signing fails.
-func (r *Request) Presign(expireTime time.Duration) (string, error) {
- r.ExpireTime = expireTime
- r.NotHoist = false
- r.Sign()
- if r.Error != nil {
- return "", r.Error
- }
- return r.HTTPRequest.URL.String(), nil
-}
-
-// PresignRequest behaves just like presign, but hoists all headers and signs them.
-// Also returns the signed hash back to the user
-func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
- r.ExpireTime = expireTime
- r.NotHoist = true
- r.Sign()
- if r.Error != nil {
- return "", nil, r.Error
- }
- return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
-}
-
-func debugLogReqError(r *Request, stage string, retrying bool, err error) {
- if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
- return
- }
-
- retryStr := "not retrying"
- if retrying {
- retryStr = "will retry"
- }
-
- r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
- stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
-}
-
-// Build will build the request's object so it can be signed and sent
-// to the service. Build will also validate all the request's parameters.
-// Anny additional build Handlers set on this request will be run
-// in the order they were set.
-//
-// The request will only be built once. Multiple calls to build will have
-// no effect.
-//
-// If any Validate or Build errors occur the build will stop and the error
-// which occurred will be returned.
-func (r *Request) Build() error {
- if !r.built {
- r.Handlers.Validate.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Validate Request", false, r.Error)
- return r.Error
- }
- r.Handlers.Build.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Build Request", false, r.Error)
- return r.Error
- }
- r.built = true
- }
-
- return r.Error
-}
-
-// Sign will sign the request returning error if errors are encountered.
-//
-// Send will build the request prior to signing. All Sign Handlers will
-// be executed in the order they were set.
-func (r *Request) Sign() error {
- r.Build()
- if r.Error != nil {
- debugLogReqError(r, "Build Request", false, r.Error)
- return r.Error
- }
-
- r.Handlers.Sign.Run(r)
- return r.Error
-}
-
-// ResetBody rewinds the request body backto its starting position, and
-// set's the HTTP Request body reference. When the body is read prior
-// to being sent in the HTTP request it will need to be rewound.
-func (r *Request) ResetBody() {
- if r.safeBody != nil {
- r.safeBody.Close()
- }
-
- r.safeBody = newOffsetReader(r.Body, r.BodyStart)
- r.HTTPRequest.Body = r.safeBody
-}
-
-// GetBody will return an io.ReadSeeker of the Request's underlying
-// input body with a concurrency safe wrapper.
-func (r *Request) GetBody() io.ReadSeeker {
- return r.safeBody
-}
-
-// Send will send the request returning error if errors are encountered.
-//
-// Send will sign the request prior to sending. All Send Handlers will
-// be executed in the order they were set.
-//
-// Canceling a request is non-deterministic. If a request has been canceled,
-// then the transport will choose, randomly, one of the state channels during
-// reads or getting the connection.
-//
-// readLoop() and getConn(req *Request, cm connectMethod)
-// https://github.com/golang/go/blob/master/src/net/http/transport.go
-//
-// Send will not close the request.Request's body.
-func (r *Request) Send() error {
- for {
- if aws.BoolValue(r.Retryable) {
- if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
- r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
- r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
- }
-
- // The previous http.Request will have a reference to the r.Body
- // and the HTTP Client's Transport may still be reading from
- // the request's body even though the Client's Do returned.
- r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
- r.ResetBody()
-
- // Closing response body to ensure that no response body is leaked
- // between retry attempts.
- if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
- r.HTTPResponse.Body.Close()
- }
- }
-
- r.Sign()
- if r.Error != nil {
- return r.Error
- }
-
- r.Retryable = nil
-
- r.Handlers.Send.Run(r)
- if r.Error != nil {
- if strings.Contains(r.Error.Error(), "net/http: request canceled") {
- return r.Error
- }
-
- err := r.Error
- r.Handlers.Retry.Run(r)
- r.Handlers.AfterRetry.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Send Request", false, r.Error)
- return r.Error
- }
- debugLogReqError(r, "Send Request", true, err)
- continue
- }
- r.Handlers.UnmarshalMeta.Run(r)
- r.Handlers.ValidateResponse.Run(r)
- if r.Error != nil {
- err := r.Error
- r.Handlers.UnmarshalError.Run(r)
- r.Handlers.Retry.Run(r)
- r.Handlers.AfterRetry.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Validate Response", false, r.Error)
- return r.Error
- }
- debugLogReqError(r, "Validate Response", true, err)
- continue
- }
-
- r.Handlers.Unmarshal.Run(r)
- if r.Error != nil {
- err := r.Error
- r.Handlers.Retry.Run(r)
- r.Handlers.AfterRetry.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Unmarshal Response", false, r.Error)
- return r.Error
- }
- debugLogReqError(r, "Unmarshal Response", true, err)
- continue
- }
-
- break
- }
-
- return nil
-}
-
-// AddToUserAgent adds the string to the end of the request's current user agent.
-func AddToUserAgent(r *Request, s string) {
- curUA := r.HTTPRequest.Header.Get("User-Agent")
- if len(curUA) > 0 {
- s = curUA + " " + s
- }
- r.HTTPRequest.Header.Set("User-Agent", s)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_6_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_6_test.go
deleted file mode 100644
index afa0d94..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_6_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build go1.6
-
-package request_test
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/defaults"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/endpoints"
-)
-
-// go version 1.4 and 1.5 do not return an error. Version 1.5 will url encode
-// the uri while 1.4 will not
-func TestRequestInvalidEndpoint(t *testing.T) {
- endpoint, _ := endpoints.NormalizeEndpoint("localhost:80 ", "test-service", "test-region", false, false)
- r := request.New(
- aws.Config{},
- metadata.ClientInfo{Endpoint: endpoint},
- defaults.Handlers(),
- client.DefaultRetryer{},
- &request.Operation{},
- nil,
- nil,
- )
-
- assert.Error(t, r.Error)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
deleted file mode 100644
index 2939ec4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package request
-
-import (
- "reflect"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
-)
-
-//type Paginater interface {
-// HasNextPage() bool
-// NextPage() *Request
-// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
-//}
-
-// HasNextPage returns true if this request has more pages of data available.
-func (r *Request) HasNextPage() bool {
- return len(r.nextPageTokens()) > 0
-}
-
-// nextPageTokens returns the tokens to use when asking for the next page of
-// data.
-func (r *Request) nextPageTokens() []interface{} {
- if r.Operation.Paginator == nil {
- return nil
- }
-
- if r.Operation.TruncationToken != "" {
- tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
- if len(tr) == 0 {
- return nil
- }
-
- switch v := tr[0].(type) {
- case *bool:
- if !aws.BoolValue(v) {
- return nil
- }
- case bool:
- if v == false {
- return nil
- }
- }
- }
-
- tokens := []interface{}{}
- tokenAdded := false
- for _, outToken := range r.Operation.OutputTokens {
- v, _ := awsutil.ValuesAtPath(r.Data, outToken)
- if len(v) > 0 {
- tokens = append(tokens, v[0])
- tokenAdded = true
- } else {
- tokens = append(tokens, nil)
- }
- }
- if !tokenAdded {
- return nil
- }
-
- return tokens
-}
-
-// NextPage returns a new Request that can be executed to return the next
-// page of result data. Call .Send() on this request to execute it.
-func (r *Request) NextPage() *Request {
- tokens := r.nextPageTokens()
- if len(tokens) == 0 {
- return nil
- }
-
- data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
- nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
- for i, intok := range nr.Operation.InputTokens {
- awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
- }
- return nr
-}
-
-// EachPage iterates over each page of a paginated request object. The fn
-// parameter should be a function with the following sample signature:
-//
-// func(page *T, lastPage bool) bool {
-// return true // return false to stop iterating
-// }
-//
-// Where "T" is the structure type matching the output structure of the given
-// operation. For example, a request object generated by
-// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
-// as the structure "T". The lastPage value represents whether the page is
-// the last page of data or not. The return value of this function should
-// return true to keep iterating or false to stop.
-func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
- for page := r; page != nil; page = page.NextPage() {
- if err := page.Send(); err != nil {
- return err
- }
- if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
- return page.Error
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go
deleted file mode 100644
index 725ea25..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go
+++ /dev/null
@@ -1,455 +0,0 @@
-package request_test
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/route53"
- "github.com/aws/aws-sdk-go/service/s3"
-)
-
-// Use DynamoDB methods for simplicity
-func TestPaginationQueryPage(t *testing.T) {
- db := dynamodb.New(unit.Session)
- tokens, pages, numPages, gotToEnd := []map[string]*dynamodb.AttributeValue{}, []map[string]*dynamodb.AttributeValue{}, 0, false
-
- reqNum := 0
- resps := []*dynamodb.QueryOutput{
- {
- LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}},
- Count: aws.Int64(1),
- Items: []map[string]*dynamodb.AttributeValue{
- {
- "key": {S: aws.String("key1")},
- },
- },
- },
- {
- LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}},
- Count: aws.Int64(1),
- Items: []map[string]*dynamodb.AttributeValue{
- {
- "key": {S: aws.String("key2")},
- },
- },
- },
- {
- LastEvaluatedKey: map[string]*dynamodb.AttributeValue{},
- Count: aws.Int64(1),
- Items: []map[string]*dynamodb.AttributeValue{
- {
- "key": {S: aws.String("key3")},
- },
- },
- },
- }
-
- db.Handlers.Send.Clear() // mock sending
- db.Handlers.Unmarshal.Clear()
- db.Handlers.UnmarshalMeta.Clear()
- db.Handlers.ValidateResponse.Clear()
- db.Handlers.Build.PushBack(func(r *request.Request) {
- in := r.Params.(*dynamodb.QueryInput)
- if in == nil {
- tokens = append(tokens, nil)
- } else if len(in.ExclusiveStartKey) != 0 {
- tokens = append(tokens, in.ExclusiveStartKey)
- }
- })
- db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = resps[reqNum]
- reqNum++
- })
-
- params := &dynamodb.QueryInput{
- Limit: aws.Int64(2),
- TableName: aws.String("tablename"),
- }
- err := db.QueryPages(params, func(p *dynamodb.QueryOutput, last bool) bool {
- numPages++
- for _, item := range p.Items {
- pages = append(pages, item)
- }
- if last {
- if gotToEnd {
- assert.Fail(t, "last=true happened twice")
- }
- gotToEnd = true
- }
- return true
- })
- assert.Nil(t, err)
-
- assert.Equal(t,
- []map[string]*dynamodb.AttributeValue{
- {"key": {S: aws.String("key1")}},
- {"key": {S: aws.String("key2")}},
- }, tokens)
- assert.Equal(t,
- []map[string]*dynamodb.AttributeValue{
- {"key": {S: aws.String("key1")}},
- {"key": {S: aws.String("key2")}},
- {"key": {S: aws.String("key3")}},
- }, pages)
- assert.Equal(t, 3, numPages)
- assert.True(t, gotToEnd)
- assert.Nil(t, params.ExclusiveStartKey)
-}
-
-// Use DynamoDB methods for simplicity
-func TestPagination(t *testing.T) {
- db := dynamodb.New(unit.Session)
- tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
-
- reqNum := 0
- resps := []*dynamodb.ListTablesOutput{
- {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
- {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
- {TableNames: []*string{aws.String("Table5")}},
- }
-
- db.Handlers.Send.Clear() // mock sending
- db.Handlers.Unmarshal.Clear()
- db.Handlers.UnmarshalMeta.Clear()
- db.Handlers.ValidateResponse.Clear()
- db.Handlers.Build.PushBack(func(r *request.Request) {
- in := r.Params.(*dynamodb.ListTablesInput)
- if in == nil {
- tokens = append(tokens, "")
- } else if in.ExclusiveStartTableName != nil {
- tokens = append(tokens, *in.ExclusiveStartTableName)
- }
- })
- db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = resps[reqNum]
- reqNum++
- })
-
- params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
- err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
- numPages++
- for _, t := range p.TableNames {
- pages = append(pages, *t)
- }
- if last {
- if gotToEnd {
- assert.Fail(t, "last=true happened twice")
- }
- gotToEnd = true
- }
- return true
- })
-
- assert.Equal(t, []string{"Table2", "Table4"}, tokens)
- assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
- assert.Equal(t, 3, numPages)
- assert.True(t, gotToEnd)
- assert.Nil(t, err)
- assert.Nil(t, params.ExclusiveStartTableName)
-}
-
-// Use DynamoDB methods for simplicity
-func TestPaginationEachPage(t *testing.T) {
- db := dynamodb.New(unit.Session)
- tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
-
- reqNum := 0
- resps := []*dynamodb.ListTablesOutput{
- {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
- {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
- {TableNames: []*string{aws.String("Table5")}},
- }
-
- db.Handlers.Send.Clear() // mock sending
- db.Handlers.Unmarshal.Clear()
- db.Handlers.UnmarshalMeta.Clear()
- db.Handlers.ValidateResponse.Clear()
- db.Handlers.Build.PushBack(func(r *request.Request) {
- in := r.Params.(*dynamodb.ListTablesInput)
- if in == nil {
- tokens = append(tokens, "")
- } else if in.ExclusiveStartTableName != nil {
- tokens = append(tokens, *in.ExclusiveStartTableName)
- }
- })
- db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = resps[reqNum]
- reqNum++
- })
-
- params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
- req, _ := db.ListTablesRequest(params)
- err := req.EachPage(func(p interface{}, last bool) bool {
- numPages++
- for _, t := range p.(*dynamodb.ListTablesOutput).TableNames {
- pages = append(pages, *t)
- }
- if last {
- if gotToEnd {
- assert.Fail(t, "last=true happened twice")
- }
- gotToEnd = true
- }
-
- return true
- })
-
- assert.Equal(t, []string{"Table2", "Table4"}, tokens)
- assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
- assert.Equal(t, 3, numPages)
- assert.True(t, gotToEnd)
- assert.Nil(t, err)
-}
-
-// Use DynamoDB methods for simplicity
-func TestPaginationEarlyExit(t *testing.T) {
- db := dynamodb.New(unit.Session)
- numPages, gotToEnd := 0, false
-
- reqNum := 0
- resps := []*dynamodb.ListTablesOutput{
- {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
- {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
- {TableNames: []*string{aws.String("Table5")}},
- }
-
- db.Handlers.Send.Clear() // mock sending
- db.Handlers.Unmarshal.Clear()
- db.Handlers.UnmarshalMeta.Clear()
- db.Handlers.ValidateResponse.Clear()
- db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = resps[reqNum]
- reqNum++
- })
-
- params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
- err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
- numPages++
- if numPages == 2 {
- return false
- }
- if last {
- if gotToEnd {
- assert.Fail(t, "last=true happened twice")
- }
- gotToEnd = true
- }
- return true
- })
-
- assert.Equal(t, 2, numPages)
- assert.False(t, gotToEnd)
- assert.Nil(t, err)
-}
-
-func TestSkipPagination(t *testing.T) {
- client := s3.New(unit.Session)
- client.Handlers.Send.Clear() // mock sending
- client.Handlers.Unmarshal.Clear()
- client.Handlers.UnmarshalMeta.Clear()
- client.Handlers.ValidateResponse.Clear()
- client.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = &s3.HeadBucketOutput{}
- })
-
- req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")})
-
- numPages, gotToEnd := 0, false
- req.EachPage(func(p interface{}, last bool) bool {
- numPages++
- if last {
- gotToEnd = true
- }
- return true
- })
- assert.Equal(t, 1, numPages)
- assert.True(t, gotToEnd)
-}
-
-// Use S3 for simplicity
-func TestPaginationTruncation(t *testing.T) {
- client := s3.New(unit.Session)
-
- reqNum := 0
- resps := []*s3.ListObjectsOutput{
- {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}},
- {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}},
- {IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}},
- {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}},
- }
-
- client.Handlers.Send.Clear() // mock sending
- client.Handlers.Unmarshal.Clear()
- client.Handlers.UnmarshalMeta.Clear()
- client.Handlers.ValidateResponse.Clear()
- client.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = resps[reqNum]
- reqNum++
- })
-
- params := &s3.ListObjectsInput{Bucket: aws.String("bucket")}
-
- results := []string{}
- err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
- results = append(results, *p.Contents[0].Key)
- return true
- })
-
- assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results)
- assert.Nil(t, err)
-
- // Try again without truncation token at all
- reqNum = 0
- resps[1].IsTruncated = nil
- resps[2].IsTruncated = aws.Bool(true)
- results = []string{}
- err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
- results = append(results, *p.Contents[0].Key)
- return true
- })
-
- assert.Equal(t, []string{"Key1", "Key2"}, results)
- assert.Nil(t, err)
-}
-
-func TestPaginationNilToken(t *testing.T) {
- client := route53.New(unit.Session)
-
- reqNum := 0
- resps := []*route53.ListResourceRecordSetsOutput{
- {
- ResourceRecordSets: []*route53.ResourceRecordSet{
- {Name: aws.String("first.example.com.")},
- },
- IsTruncated: aws.Bool(true),
- NextRecordName: aws.String("second.example.com."),
- NextRecordType: aws.String("MX"),
- NextRecordIdentifier: aws.String("second"),
- MaxItems: aws.String("1"),
- },
- {
- ResourceRecordSets: []*route53.ResourceRecordSet{
- {Name: aws.String("second.example.com.")},
- },
- IsTruncated: aws.Bool(true),
- NextRecordName: aws.String("third.example.com."),
- NextRecordType: aws.String("MX"),
- MaxItems: aws.String("1"),
- },
- {
- ResourceRecordSets: []*route53.ResourceRecordSet{
- {Name: aws.String("third.example.com.")},
- },
- IsTruncated: aws.Bool(false),
- MaxItems: aws.String("1"),
- },
- }
- client.Handlers.Send.Clear() // mock sending
- client.Handlers.Unmarshal.Clear()
- client.Handlers.UnmarshalMeta.Clear()
- client.Handlers.ValidateResponse.Clear()
-
- idents := []string{}
- client.Handlers.Build.PushBack(func(r *request.Request) {
- p := r.Params.(*route53.ListResourceRecordSetsInput)
- idents = append(idents, aws.StringValue(p.StartRecordIdentifier))
-
- })
- client.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = resps[reqNum]
- reqNum++
- })
-
- params := &route53.ListResourceRecordSetsInput{
- HostedZoneId: aws.String("id-zone"),
- }
-
- results := []string{}
- err := client.ListResourceRecordSetsPages(params, func(p *route53.ListResourceRecordSetsOutput, last bool) bool {
- results = append(results, *p.ResourceRecordSets[0].Name)
- return true
- })
-
- assert.NoError(t, err)
- assert.Equal(t, []string{"", "second", ""}, idents)
- assert.Equal(t, []string{"first.example.com.", "second.example.com.", "third.example.com."}, results)
-}
-
-// Benchmarks
-var benchResps = []*dynamodb.ListTablesOutput{
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
- {TableNames: []*string{aws.String("TABLE")}},
-}
-
-var benchDb = func() *dynamodb.DynamoDB {
- db := dynamodb.New(unit.Session)
- db.Handlers.Send.Clear() // mock sending
- db.Handlers.Unmarshal.Clear()
- db.Handlers.UnmarshalMeta.Clear()
- db.Handlers.ValidateResponse.Clear()
- return db
-}
-
-func BenchmarkCodegenIterator(b *testing.B) {
- reqNum := 0
- db := benchDb()
- db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = benchResps[reqNum]
- reqNum++
- })
-
- input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
- iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error {
- page, _ := db.ListTablesRequest(input)
- for ; page != nil; page = page.NextPage() {
- page.Send()
- out := page.Data.(*dynamodb.ListTablesOutput)
- if result := fn(out, !page.HasNextPage()); page.Error != nil || !result {
- return page.Error
- }
- }
- return nil
- }
-
- for i := 0; i < b.N; i++ {
- reqNum = 0
- iter(func(p *dynamodb.ListTablesOutput, last bool) bool {
- return true
- })
- }
-}
-
-func BenchmarkEachPageIterator(b *testing.B) {
- reqNum := 0
- db := benchDb()
- db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
- r.Data = benchResps[reqNum]
- reqNum++
- })
-
- input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
- for i := 0; i < b.N; i++ {
- reqNum = 0
- req, _ := db.ListTablesRequest(input)
- req.EachPage(func(p interface{}, last bool) bool {
- return true
- })
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go
deleted file mode 100644
index 16bdd61..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go
+++ /dev/null
@@ -1,380 +0,0 @@
-package request_test
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "runtime"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting"
-)
-
-type testData struct {
- Data string
-}
-
-func body(str string) io.ReadCloser {
- return ioutil.NopCloser(bytes.NewReader([]byte(str)))
-}
-
-func unmarshal(req *request.Request) {
- defer req.HTTPResponse.Body.Close()
- if req.Data != nil {
- json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data)
- }
- return
-}
-
-func unmarshalError(req *request.Request) {
- bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
- if err != nil {
- req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err)
- return
- }
- if len(bodyBytes) == 0 {
- req.Error = awserr.NewRequestFailure(
- awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")),
- req.HTTPResponse.StatusCode,
- "",
- )
- return
- }
- var jsonErr jsonErrorResponse
- if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
- req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err)
- return
- }
- req.Error = awserr.NewRequestFailure(
- awserr.New(jsonErr.Code, jsonErr.Message, nil),
- req.HTTPResponse.StatusCode,
- "",
- )
-}
-
-type jsonErrorResponse struct {
- Code string `json:"__type"`
- Message string `json:"message"`
-}
-
-// test that retries occur for 5xx status codes
-func TestRequestRecoverRetry5xx(t *testing.T) {
- reqNum := 0
- reqs := []http.Response{
- {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
- {StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
- {StatusCode: 200, Body: body(`{"data":"valid"}`)},
- }
-
- s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.PushBack(unmarshal)
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
- s.Handlers.Send.Clear() // mock sending
- s.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = &reqs[reqNum]
- reqNum++
- })
- out := &testData{}
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
- err := r.Send()
- assert.Nil(t, err)
- assert.Equal(t, 2, int(r.RetryCount))
- assert.Equal(t, "valid", out.Data)
-}
-
-// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry`
-func TestRequestRecoverRetry4xxRetryable(t *testing.T) {
- reqNum := 0
- reqs := []http.Response{
- {StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)},
- {StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)},
- {StatusCode: 200, Body: body(`{"data":"valid"}`)},
- }
-
- s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.PushBack(unmarshal)
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
- s.Handlers.Send.Clear() // mock sending
- s.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = &reqs[reqNum]
- reqNum++
- })
- out := &testData{}
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
- err := r.Send()
- assert.Nil(t, err)
- assert.Equal(t, 2, int(r.RetryCount))
- assert.Equal(t, "valid", out.Data)
-}
-
-// test that retries don't occur for 4xx status codes with a response type that can't be retried
-func TestRequest4xxUnretryable(t *testing.T) {
- s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.PushBack(unmarshal)
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
- s.Handlers.Send.Clear() // mock sending
- s.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)}
- })
- out := &testData{}
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
- err := r.Send()
- assert.NotNil(t, err)
- if e, ok := err.(awserr.RequestFailure); ok {
- assert.Equal(t, 401, e.StatusCode())
- } else {
- assert.Fail(t, "Expected error to be a service failure")
- }
- assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code())
- assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message())
- assert.Equal(t, 0, int(r.RetryCount))
-}
-
-func TestRequestExhaustRetries(t *testing.T) {
- delays := []time.Duration{}
- sleepDelay := func(delay time.Duration) {
- delays = append(delays, delay)
- }
-
- reqNum := 0
- reqs := []http.Response{
- {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
- {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
- {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
- {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
- }
-
- s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay))
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.PushBack(unmarshal)
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
- s.Handlers.Send.Clear() // mock sending
- s.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = &reqs[reqNum]
- reqNum++
- })
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
- err := r.Send()
- assert.NotNil(t, err)
- if e, ok := err.(awserr.RequestFailure); ok {
- assert.Equal(t, 500, e.StatusCode())
- } else {
- assert.Fail(t, "Expected error to be a service failure")
- }
- assert.Equal(t, "UnknownError", err.(awserr.Error).Code())
- assert.Equal(t, "An error occurred.", err.(awserr.Error).Message())
- assert.Equal(t, 3, int(r.RetryCount))
-
- expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}}
- for i, v := range delays {
- min := expectDelays[i].min * time.Millisecond
- max := expectDelays[i].max * time.Millisecond
- assert.True(t, min <= v && v <= max,
- "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max)
- }
-}
-
-// test that the request is retried after the credentials are expired.
-func TestRequestRecoverExpiredCreds(t *testing.T) {
- reqNum := 0
- reqs := []http.Response{
- {StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)},
- {StatusCode: 200, Body: body(`{"data":"valid"}`)},
- }
-
- s := awstesting.NewClient(&aws.Config{MaxRetries: aws.Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")})
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.PushBack(unmarshal)
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
-
- credExpiredBeforeRetry := false
- credExpiredAfterRetry := false
-
- s.Handlers.AfterRetry.PushBack(func(r *request.Request) {
- credExpiredAfterRetry = r.Config.Credentials.IsExpired()
- })
-
- s.Handlers.Sign.Clear()
- s.Handlers.Sign.PushBack(func(r *request.Request) {
- r.Config.Credentials.Get()
- })
- s.Handlers.Send.Clear() // mock sending
- s.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = &reqs[reqNum]
- reqNum++
- })
- out := &testData{}
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
- err := r.Send()
- assert.Nil(t, err)
-
- assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check")
- assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check")
- assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery")
-
- assert.Equal(t, 1, int(r.RetryCount))
- assert.Equal(t, "valid", out.Data)
-}
-
-func TestMakeAddtoUserAgentHandler(t *testing.T) {
- fn := request.MakeAddToUserAgentHandler("name", "version", "extra1", "extra2")
- r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}}
- r.HTTPRequest.Header.Set("User-Agent", "foo/bar")
- fn(r)
-
- assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent"))
-}
-
-func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) {
- fn := request.MakeAddToUserAgentFreeFormHandler("name/version (extra1; extra2)")
- r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}}
- r.HTTPRequest.Header.Set("User-Agent", "foo/bar")
- fn(r)
-
- assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent"))
-}
-
-func TestRequestUserAgent(t *testing.T) {
- s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")})
- // s.Handlers.Validate.Clear()
-
- req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{})
- req.HTTPRequest.Header.Set("User-Agent", "foo/bar")
- assert.NoError(t, req.Build())
-
- expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)",
- aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
- assert.Equal(t, expectUA, req.HTTPRequest.Header.Get("User-Agent"))
-}
-
-func TestRequestThrottleRetries(t *testing.T) {
- delays := []time.Duration{}
- sleepDelay := func(delay time.Duration) {
- delays = append(delays, delay)
- }
-
- reqNum := 0
- reqs := []http.Response{
- {StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)},
- {StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)},
- {StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)},
- {StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)},
- }
-
- s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay))
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.PushBack(unmarshal)
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
- s.Handlers.Send.Clear() // mock sending
- s.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = &reqs[reqNum]
- reqNum++
- })
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
- err := r.Send()
- assert.NotNil(t, err)
- if e, ok := err.(awserr.RequestFailure); ok {
- assert.Equal(t, 500, e.StatusCode())
- } else {
- assert.Fail(t, "Expected error to be a service failure")
- }
- assert.Equal(t, "Throttling", err.(awserr.Error).Code())
- assert.Equal(t, "An error occurred.", err.(awserr.Error).Message())
- assert.Equal(t, 3, int(r.RetryCount))
-
- expectDelays := []struct{ min, max time.Duration }{{500, 999}, {1000, 1998}, {2000, 3996}}
- for i, v := range delays {
- min := expectDelays[i].min * time.Millisecond
- max := expectDelays[i].max * time.Millisecond
- assert.True(t, min <= v && v <= max,
- "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max)
- }
-}
-
-// test that retries occur for request timeouts when response.Body can be nil
-func TestRequestRecoverTimeoutWithNilBody(t *testing.T) {
- reqNum := 0
- reqs := []*http.Response{
- {StatusCode: 0, Body: nil}, // body can be nil when requests time out
- {StatusCode: 200, Body: body(`{"data":"valid"}`)},
- }
- errors := []error{
- errors.New("timeout"), nil,
- }
-
- s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.PushBack(unmarshal)
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
- s.Handlers.AfterRetry.Clear() // force retry on all errors
- s.Handlers.AfterRetry.PushBack(func(r *request.Request) {
- if r.Error != nil {
- r.Error = nil
- r.Retryable = aws.Bool(true)
- r.RetryCount++
- }
- })
- s.Handlers.Send.Clear() // mock sending
- s.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = reqs[reqNum]
- r.Error = errors[reqNum]
- reqNum++
- })
- out := &testData{}
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
- err := r.Send()
- assert.Nil(t, err)
- assert.Equal(t, 1, int(r.RetryCount))
- assert.Equal(t, "valid", out.Data)
-}
-
-func TestRequestRecoverTimeoutWithNilResponse(t *testing.T) {
- reqNum := 0
- reqs := []*http.Response{
- nil,
- {StatusCode: 200, Body: body(`{"data":"valid"}`)},
- }
- errors := []error{
- errors.New("timeout"),
- nil,
- }
-
- s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
- s.Handlers.Validate.Clear()
- s.Handlers.Unmarshal.PushBack(unmarshal)
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
- s.Handlers.AfterRetry.Clear() // force retry on all errors
- s.Handlers.AfterRetry.PushBack(func(r *request.Request) {
- if r.Error != nil {
- r.Error = nil
- r.Retryable = aws.Bool(true)
- r.RetryCount++
- }
- })
- s.Handlers.Send.Clear() // mock sending
- s.Handlers.Send.PushBack(func(r *request.Request) {
- r.HTTPResponse = reqs[reqNum]
- r.Error = errors[reqNum]
- reqNum++
- })
- out := &testData{}
- r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
- err := r.Send()
- assert.Nil(t, err)
- assert.Equal(t, 1, int(r.RetryCount))
- assert.Equal(t, "valid", out.Data)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
deleted file mode 100644
index 8cc8b01..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package request
-
-import (
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// Retryer is an interface to control retry logic for a given service.
-// The default implementation used by most services is the service.DefaultRetryer
-// structure, which contains basic retry logic using exponential backoff.
-type Retryer interface {
- RetryRules(*Request) time.Duration
- ShouldRetry(*Request) bool
- MaxRetries() int
-}
-
-// WithRetryer sets a config Retryer value to the given Config returning it
-// for chaining.
-func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
- cfg.Retryer = retryer
- return cfg
-}
-
-// retryableCodes is a collection of service response codes which are retry-able
-// without any further action.
-var retryableCodes = map[string]struct{}{
- "RequestError": {},
- "RequestTimeout": {},
-}
-
-var throttleCodes = map[string]struct{}{
- "ProvisionedThroughputExceededException": {},
- "Throttling": {},
- "ThrottlingException": {},
- "RequestLimitExceeded": {},
- "RequestThrottled": {},
- "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
- "TooManyRequestsException": {}, // Lambda functions
-}
-
-// credsExpiredCodes is a collection of error codes which signify the credentials
-// need to be refreshed. Expired tokens require refreshing of credentials, and
-// resigning before the request can be retried.
-var credsExpiredCodes = map[string]struct{}{
- "ExpiredToken": {},
- "ExpiredTokenException": {},
- "RequestExpired": {}, // EC2 Only
-}
-
-func isCodeThrottle(code string) bool {
- _, ok := throttleCodes[code]
- return ok
-}
-
-func isCodeRetryable(code string) bool {
- if _, ok := retryableCodes[code]; ok {
- return true
- }
-
- return isCodeExpiredCreds(code)
-}
-
-func isCodeExpiredCreds(code string) bool {
- _, ok := credsExpiredCodes[code]
- return ok
-}
-
-// IsErrorRetryable returns whether the error is retryable, based on its Code.
-// Returns false if the request has no Error set.
-func (r *Request) IsErrorRetryable() bool {
- if r.Error != nil {
- if err, ok := r.Error.(awserr.Error); ok {
- return isCodeRetryable(err.Code())
- }
- }
- return false
-}
-
-// IsErrorThrottle returns whether the error is to be throttled based on its code.
-// Returns false if the request has no Error set
-func (r *Request) IsErrorThrottle() bool {
- if r.Error != nil {
- if err, ok := r.Error.(awserr.Error); ok {
- return isCodeThrottle(err.Code())
- }
- }
- return false
-}
-
-// IsErrorExpired returns whether the error code is a credential expiry error.
-// Returns false if the request has no Error set.
-func (r *Request) IsErrorExpired() bool {
- if r.Error != nil {
- if err, ok := r.Error.(awserr.Error); ok {
- return isCodeExpiredCreds(err.Code())
- }
- }
- return false
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer_test.go
deleted file mode 100644
index b1926e3..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer_test.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package request
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-func TestRequestThrottling(t *testing.T) {
- req := Request{}
-
- req.Error = awserr.New("Throttling", "", nil)
- assert.True(t, req.IsErrorThrottle())
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
deleted file mode 100644
index 2520286..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package request
-
-import (
- "bytes"
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-const (
- // InvalidParameterErrCode is the error code for invalid parameters errors
- InvalidParameterErrCode = "InvalidParameter"
- // ParamRequiredErrCode is the error code for required parameter errors
- ParamRequiredErrCode = "ParamRequiredError"
- // ParamMinValueErrCode is the error code for fields with too low of a
- // number value.
- ParamMinValueErrCode = "ParamMinValueError"
- // ParamMinLenErrCode is the error code for fields without enough elements.
- ParamMinLenErrCode = "ParamMinLenError"
-)
-
-// Validator provides a way for types to perform validation logic on their
-// input values that external code can use to determine if a type's values
-// are valid.
-type Validator interface {
- Validate() error
-}
-
-// An ErrInvalidParams provides wrapping of invalid parameter errors found when
-// validating API operation input parameters.
-type ErrInvalidParams struct {
- // Context is the base context of the invalid parameter group.
- Context string
- errs []ErrInvalidParam
-}
-
-// Add adds a new invalid parameter error to the collection of invalid
-// parameters. The context of the invalid parameter will be updated to reflect
-// this collection.
-func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
- err.SetContext(e.Context)
- e.errs = append(e.errs, err)
-}
-
-// AddNested adds the invalid parameter errors from another ErrInvalidParams
-// value into this collection. The nested errors will have their nested context
-// updated and base context to reflect the merging.
-//
-// Use for nested validations errors.
-func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
- for _, err := range nested.errs {
- err.SetContext(e.Context)
- err.AddNestedContext(nestedCtx)
- e.errs = append(e.errs, err)
- }
-}
-
-// Len returns the number of invalid parameter errors
-func (e ErrInvalidParams) Len() int {
- return len(e.errs)
-}
-
-// Code returns the code of the error
-func (e ErrInvalidParams) Code() string {
- return InvalidParameterErrCode
-}
-
-// Message returns the message of the error
-func (e ErrInvalidParams) Message() string {
- return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
-}
-
-// Error returns the string formatted form of the invalid parameters.
-func (e ErrInvalidParams) Error() string {
- w := &bytes.Buffer{}
- fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
-
- for _, err := range e.errs {
- fmt.Fprintf(w, "- %s\n", err.Message())
- }
-
- return w.String()
-}
-
-// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
-func (e ErrInvalidParams) OrigErr() error {
- return awserr.NewBatchError(
- InvalidParameterErrCode, e.Message(), e.OrigErrs())
-}
-
-// OrigErrs returns a slice of the invalid parameters
-func (e ErrInvalidParams) OrigErrs() []error {
- errs := make([]error, len(e.errs))
- for i := 0; i < len(errs); i++ {
- errs[i] = e.errs[i]
- }
-
- return errs
-}
-
-// An ErrInvalidParam represents an invalid parameter error type.
-type ErrInvalidParam interface {
- awserr.Error
-
- // Field name the error occurred on.
- Field() string
-
- // SetContext updates the context of the error.
- SetContext(string)
-
- // AddNestedContext updates the error's context to include a nested level.
- AddNestedContext(string)
-}
-
-type errInvalidParam struct {
- context string
- nestedContext string
- field string
- code string
- msg string
-}
-
-// Code returns the error code for the type of invalid parameter.
-func (e *errInvalidParam) Code() string {
- return e.code
-}
-
-// Message returns the reason the parameter was invalid, and its context.
-func (e *errInvalidParam) Message() string {
- return fmt.Sprintf("%s, %s.", e.msg, e.Field())
-}
-
-// Error returns the string version of the invalid parameter error.
-func (e *errInvalidParam) Error() string {
- return fmt.Sprintf("%s: %s", e.code, e.Message())
-}
-
-// OrigErr returns nil, Implemented for awserr.Error interface.
-func (e *errInvalidParam) OrigErr() error {
- return nil
-}
-
-// Field Returns the field and context the error occurred.
-func (e *errInvalidParam) Field() string {
- field := e.context
- if len(field) > 0 {
- field += "."
- }
- if len(e.nestedContext) > 0 {
- field += fmt.Sprintf("%s.", e.nestedContext)
- }
- field += e.field
-
- return field
-}
-
-// SetContext updates the base context of the error.
-func (e *errInvalidParam) SetContext(ctx string) {
- e.context = ctx
-}
-
-// AddNestedContext prepends a context to the field's path.
-func (e *errInvalidParam) AddNestedContext(ctx string) {
- if len(e.nestedContext) == 0 {
- e.nestedContext = ctx
- } else {
- e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
- }
-
-}
-
-// An ErrParamRequired represents an required parameter error.
-type ErrParamRequired struct {
- errInvalidParam
-}
-
-// NewErrParamRequired creates a new required parameter error.
-func NewErrParamRequired(field string) *ErrParamRequired {
- return &ErrParamRequired{
- errInvalidParam{
- code: ParamRequiredErrCode,
- field: field,
- msg: fmt.Sprintf("missing required field"),
- },
- }
-}
-
-// An ErrParamMinValue represents a minimum value parameter error.
-type ErrParamMinValue struct {
- errInvalidParam
- min float64
-}
-
-// NewErrParamMinValue creates a new minimum value parameter error.
-func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
- return &ErrParamMinValue{
- errInvalidParam: errInvalidParam{
- code: ParamMinValueErrCode,
- field: field,
- msg: fmt.Sprintf("minimum field value of %v", min),
- },
- min: min,
- }
-}
-
-// MinValue returns the field's require minimum value.
-//
-// float64 is returned for both int and float min values.
-func (e *ErrParamMinValue) MinValue() float64 {
- return e.min
-}
-
-// An ErrParamMinLen represents a minimum length parameter error.
-type ErrParamMinLen struct {
- errInvalidParam
- min int
-}
-
-// NewErrParamMinLen creates a new minimum length parameter error.
-func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
- return &ErrParamMinLen{
- errInvalidParam: errInvalidParam{
- code: ParamMinValueErrCode,
- field: field,
- msg: fmt.Sprintf("minimum field size of %v", min),
- },
- min: min,
- }
-}
-
-// MinLen returns the field's required minimum length.
-func (e *ErrParamMinLen) MinLen() int {
- return e.min
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
deleted file mode 100644
index d3dc840..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
-Package session provides configuration for the SDK's service clients.
-
-Sessions can be shared across all service clients that share the same base
-configuration. The Session is built from the SDK's default configuration and
-request handlers.
-
-Sessions should be cached when possible, because creating a new Session will
-load all configuration values from the environment, and config files each time
-the Session is created. Sharing the Session value across all of your service
-clients will ensure the configuration is loaded the fewest number of times possible.
-
-Concurrency
-
-Sessions are safe to use concurrently as long as the Session is not being
-modified. The SDK will not modify the Session once the Session has been created.
-Creating service clients concurrently from a shared Session is safe.
-
-Sessions from Shared Config
-
-Sessions can be created using the method above that will only load the
-additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
-Alternatively you can explicitly create a Session with shared config enabled.
-To do this you can use NewSessionWithOptions to configure how the Session will
-be created. Using the NewSessionWithOptions with SharedConfigState set to
-SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG
-environment variable was set.
-
-Creating Sessions
-
-When creating Sessions optional aws.Config values can be passed in that will
-override the default, or loaded config values the Session is being created
-with. This allows you to provide additional, or case based, configuration
-as needed.
-
-By default NewSession will only load credentials from the shared credentials
-file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
-set to a truthy value the Session will be created from the configuration
-values from the shared config (~/.aws/config) and shared credentials
-(~/.aws/credentials) files. See the section Sessions from Shared Config for
-more information.
-
-Create a Session with the default config and request handlers. With credentials
-region, and profile loaded from the environment and shared config automatically.
-Requires the AWS_PROFILE to be set, or "default" is used.
-
- // Create Session
- sess, err := session.NewSession()
-
- // Create a Session with a custom region
- sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")})
-
- // Create a S3 client instance from a session
- sess, err := session.NewSession()
- if err != nil {
- // Handle Session creation error
- }
- svc := s3.New(sess)
-
-Create Session With Option Overrides
-
-In addition to NewSession, Sessions can be created using NewSessionWithOptions.
-This func allows you to control and override how the Session will be created
-through code instead of being driven by environment variables only.
-
-Use NewSessionWithOptions when you want to provide the config profile, or
-override the shared config state (AWS_SDK_LOAD_CONFIG).
-
- // Equivalent to session.NewSession()
- sess, err := session.NewSessionWithOptions(session.Options{})
-
- // Specify profile to load for the session's config
- sess, err := session.NewSessionWithOptions(session.Options{
- Profile: "profile_name",
- })
-
- // Specify profile for config and region for requests
- sess, err := session.NewSessionWithOptions(session.Options{
- Config: aws.Config{Region: aws.String("us-east-1")},
- Profile: "profile_name",
- })
-
- // Force enable Shared Config support
- sess, err := session.NewSessionWithOptions(session.Options{
- SharedConfigState: SharedConfigEnable,
- })
-
-Adding Handlers
-
-You can add handlers to a session for processing HTTP requests. All service
-clients that use the session inherit the handlers. For example, the following
-handler logs every request and its payload made by a service client:
-
- // Create a session, and add additional handlers for all service
- // clients created with the Session to inherit. Adds logging handler.
- sess, err := session.NewSession()
- sess.Handlers.Send.PushFront(func(r *request.Request) {
- // Log every request made and its payload
- logger.Println("Request: %s/%s, Payload: %s",
- r.ClientInfo.ServiceName, r.Operation, r.Params)
- })
-
-Deprecated "New" function
-
-The New session function has been deprecated because it does not provide good
-way to return errors that occur when loading the configuration files and values.
-Because of this, NewSession was created so errors can be retrieved when
-creating a session fails.
-
-Shared Config Fields
-
-By default the SDK will only load the shared credentials file's (~/.aws/credentials)
-credentials values, and all other config is provided by the environment variables,
-SDK defaults, and user provided aws.Config values.
-
-If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
-option is used to create the Session the full shared config values will be
-loaded. This includes credentials, region, and support for assume role. In
-addition the Session will load its configuration from both the shared config
-file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
-files have the same format.
-
-If both config files are present the configuration from both files will be
-read. The Session will be created from configuration values from the shared
-credentials file (~/.aws/credentials) over those in the shared credentials
-file (~/.aws/config).
-
-Credentials are the values the SDK should use for authenticating requests with
-AWS Services. They arfrom a configuration file will need to include both
-aws_access_key_id and aws_secret_access_key must be provided together in the
-same file to be considered valid. The values will be ignored if not a complete
-group. aws_session_token is an optional field that can be provided if both of
-the other two fields are also provided.
-
- aws_access_key_id = AKID
- aws_secret_access_key = SECRET
- aws_session_token = TOKEN
-
-Assume Role values allow you to configure the SDK to assume an IAM role using
-a set of credentials provided in a config file via the source_profile field.
-Both "role_arn" and "source_profile" are required. The SDK does not support
-assuming a role with MFA token Via the Session's constructor. You can use the
-stscreds.AssumeRoleProvider credentials provider to specify custom
-configuration and support for MFA.
-
- role_arn = arn:aws:iam:::role/
- source_profile = profile_with_creds
- external_id = 1234
- mfa_serial = not supported!
- role_session_name = session_name
-
-Region is the region the SDK should use for looking up AWS service endpoints
-and signing requests.
-
- region = us-east-1
-
-Environment Variables
-
-When a Session is created several environment variables can be set to adjust
-how the SDK functions, and what configuration data it loads when creating
-Sessions. All environment values are optional, but some values like credentials
-require multiple of the values to set or the partial values will be ignored.
-All environment variable values are strings unless otherwise noted.
-
-Environment configuration values. If set both Access Key ID and Secret Access
-Key must be provided. Session Token and optionally also be provided, but is
-not required.
-
- # Access Key ID
- AWS_ACCESS_KEY_ID=AKID
- AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
-
- # Secret Access Key
- AWS_SECRET_ACCESS_KEY=SECRET
- AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
-
- # Session Token
- AWS_SESSION_TOKEN=TOKEN
-
-Region value will instruct the SDK where to make service API requests to. If is
-not provided in the environment the region must be provided before a service
-client request is made.
-
- AWS_REGION=us-east-1
-
- # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
- # and AWS_REGION is not also set.
- AWS_DEFAULT_REGION=us-east-1
-
-Profile name the SDK should load use when loading shared config from the
-configuration files. If not provided "default" will be used as the profile name.
-
- AWS_PROFILE=my_profile
-
- # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
- # and AWS_PROFILE is not also set.
- AWS_DEFAULT_PROFILE=my_profile
-
-SDK load config instructs the SDK to load the shared config in addition to
-shared credentials. This also expands the configuration loaded so the shared
-credentials will have parity with the shared config file. This also enables
-Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
-env values as well.
-
- AWS_SDK_LOAD_CONFIG=1
-
-Shared credentials file path can be set to instruct the SDK to use an alternative
-file for the shared credentials. If not set the file will be loaded from
-$HOME/.aws/credentials on Linux/Unix based systems, and
-%USERPROFILE%\.aws\credentials on Windows.
-
- AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
-
-Shared config file path can be set to instruct the SDK to use an alternative
-file for the shared config. If not set the file will be loaded from
-$HOME/.aws/config on Linux/Unix based systems, and
-%USERPROFILE%\.aws\config on Windows.
-
- AWS_CONFIG_FILE=$HOME/my_shared_config
-
-
-*/
-package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
deleted file mode 100644
index d2f0c84..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package session
-
-import (
- "os"
- "path/filepath"
- "strconv"
-
- "github.com/aws/aws-sdk-go/aws/credentials"
-)
-
-// envConfig is a collection of environment values the SDK will read
-// setup config from. All environment values are optional. But some values
-// such as credentials require multiple values to be complete or the values
-// will be ignored.
-type envConfig struct {
- // Environment configuration values. If set both Access Key ID and Secret Access
- // Key must be provided. Session Token and optionally also be provided, but is
- // not required.
- //
- // # Access Key ID
- // AWS_ACCESS_KEY_ID=AKID
- // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
- //
- // # Secret Access Key
- // AWS_SECRET_ACCESS_KEY=SECRET
- // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
- //
- // # Session Token
- // AWS_SESSION_TOKEN=TOKEN
- Creds credentials.Value
-
- // Region value will instruct the SDK where to make service API requests to. If is
- // not provided in the environment the region must be provided before a service
- // client request is made.
- //
- // AWS_REGION=us-east-1
- //
- // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
- // # and AWS_REGION is not also set.
- // AWS_DEFAULT_REGION=us-east-1
- Region string
-
- // Profile name the SDK should load use when loading shared configuration from the
- // shared configuration files. If not provided "default" will be used as the
- // profile name.
- //
- // AWS_PROFILE=my_profile
- //
- // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
- // # and AWS_PROFILE is not also set.
- // AWS_DEFAULT_PROFILE=my_profile
- Profile string
-
- // SDK load config instructs the SDK to load the shared config in addition to
- // shared credentials. This also expands the configuration loaded from the shared
- // credentials to have parity with the shared config file. This also enables
- // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
- // env values as well.
- //
- // AWS_SDK_LOAD_CONFIG=1
- EnableSharedConfig bool
-
- // Shared credentials file path can be set to instruct the SDK to use an alternate
- // file for the shared credentials. If not set the file will be loaded from
- // $HOME/.aws/credentials on Linux/Unix based systems, and
- // %USERPROFILE%\.aws\credentials on Windows.
- //
- // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
- SharedCredentialsFile string
-
- // Shared config file path can be set to instruct the SDK to use an alternate
- // file for the shared config. If not set the file will be loaded from
- // $HOME/.aws/config on Linux/Unix based systems, and
- // %USERPROFILE%\.aws\config on Windows.
- //
- // AWS_CONFIG_FILE=$HOME/my_shared_config
- SharedConfigFile string
-}
-
-var (
- credAccessEnvKey = []string{
- "AWS_ACCESS_KEY_ID",
- "AWS_ACCESS_KEY",
- }
- credSecretEnvKey = []string{
- "AWS_SECRET_ACCESS_KEY",
- "AWS_SECRET_KEY",
- }
- credSessionEnvKey = []string{
- "AWS_SESSION_TOKEN",
- }
-
- regionEnvKeys = []string{
- "AWS_REGION",
- "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
- }
- profileEnvKeys = []string{
- "AWS_PROFILE",
- "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
- }
-)
-
-// loadEnvConfig retrieves the SDK's environment configuration.
-// See `envConfig` for the values that will be retrieved.
-//
-// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
-// the shared SDK config will be loaded in addition to the SDK's specific
-// configuration values.
-func loadEnvConfig() envConfig {
- enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
- return envConfigLoad(enableSharedConfig)
-}
-
-// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
-// SDK shared config. See `envConfig` for the values that will be retrieved.
-//
-// Loads the shared configuration in addition to the SDK's specific configuration.
-// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
-// environment variable is set.
-func loadSharedEnvConfig() envConfig {
- return envConfigLoad(true)
-}
-
-func envConfigLoad(enableSharedConfig bool) envConfig {
- cfg := envConfig{}
-
- cfg.EnableSharedConfig = enableSharedConfig
-
- setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
- setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
- setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
-
- // Require logical grouping of credentials
- if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
- cfg.Creds = credentials.Value{}
- } else {
- cfg.Creds.ProviderName = "EnvConfigCredentials"
- }
-
- regionKeys := regionEnvKeys
- profileKeys := profileEnvKeys
- if !cfg.EnableSharedConfig {
- regionKeys = regionKeys[:1]
- profileKeys = profileKeys[:1]
- }
-
- setFromEnvVal(&cfg.Region, regionKeys)
- setFromEnvVal(&cfg.Profile, profileKeys)
-
- cfg.SharedCredentialsFile = sharedCredentialsFilename()
- cfg.SharedConfigFile = sharedConfigFilename()
-
- return cfg
-}
-
-func setFromEnvVal(dst *string, keys []string) {
- for _, k := range keys {
- if v := os.Getenv(k); len(v) > 0 {
- *dst = v
- break
- }
- }
-}
-
-func sharedCredentialsFilename() string {
- if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 {
- return name
- }
-
- return filepath.Join(userHomeDir(), ".aws", "credentials")
-}
-
-func sharedConfigFilename() string {
- if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 {
- return name
- }
-
- return filepath.Join(userHomeDir(), ".aws", "config")
-}
-
-func userHomeDir() string {
- homeDir := os.Getenv("HOME") // *nix
- if len(homeDir) == 0 { // windows
- homeDir = os.Getenv("USERPROFILE")
- }
-
- return homeDir
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config_test.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config_test.go
deleted file mode 100644
index 5a6aa7d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config_test.go
+++ /dev/null
@@ -1,276 +0,0 @@
-package session
-
-import (
- "os"
- "path/filepath"
- "strings"
- "testing"
-
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/stretchr/testify/assert"
-)
-
-func TestLoadEnvConfig_Creds(t *testing.T) {
- env := stashEnv()
- defer popEnv(env)
-
- cases := []struct {
- Env map[string]string
- Val credentials.Value
- }{
- {
- Env: map[string]string{
- "AWS_ACCESS_KEY": "AKID",
- },
- Val: credentials.Value{},
- },
- {
- Env: map[string]string{
- "AWS_ACCESS_KEY_ID": "AKID",
- },
- Val: credentials.Value{},
- },
- {
- Env: map[string]string{
- "AWS_SECRET_KEY": "SECRET",
- },
- Val: credentials.Value{},
- },
- {
- Env: map[string]string{
- "AWS_SECRET_ACCESS_KEY": "SECRET",
- },
- Val: credentials.Value{},
- },
- {
- Env: map[string]string{
- "AWS_ACCESS_KEY_ID": "AKID",
- "AWS_SECRET_ACCESS_KEY": "SECRET",
- },
- Val: credentials.Value{
- AccessKeyID: "AKID", SecretAccessKey: "SECRET",
- ProviderName: "EnvConfigCredentials",
- },
- },
- {
- Env: map[string]string{
- "AWS_ACCESS_KEY": "AKID",
- "AWS_SECRET_KEY": "SECRET",
- },
- Val: credentials.Value{
- AccessKeyID: "AKID", SecretAccessKey: "SECRET",
- ProviderName: "EnvConfigCredentials",
- },
- },
- {
- Env: map[string]string{
- "AWS_ACCESS_KEY": "AKID",
- "AWS_SECRET_KEY": "SECRET",
- "AWS_SESSION_TOKEN": "TOKEN",
- },
- Val: credentials.Value{
- AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "TOKEN",
- ProviderName: "EnvConfigCredentials",
- },
- },
- }
-
- for _, c := range cases {
- os.Clearenv()
-
- for k, v := range c.Env {
- os.Setenv(k, v)
- }
-
- cfg := loadEnvConfig()
- assert.Equal(t, c.Val, cfg.Creds)
- }
-}
-
-func TestLoadEnvConfig(t *testing.T) {
- env := stashEnv()
- defer popEnv(env)
-
- cases := []struct {
- Env map[string]string
- Region, Profile string
- UseSharedConfigCall bool
- }{
- {
- Env: map[string]string{
- "AWS_REGION": "region",
- "AWS_PROFILE": "profile",
- },
- Region: "region", Profile: "profile",
- },
- {
- Env: map[string]string{
- "AWS_REGION": "region",
- "AWS_DEFAULT_REGION": "default_region",
- "AWS_PROFILE": "profile",
- "AWS_DEFAULT_PROFILE": "default_profile",
- },
- Region: "region", Profile: "profile",
- },
- {
- Env: map[string]string{
- "AWS_REGION": "region",
- "AWS_DEFAULT_REGION": "default_region",
- "AWS_PROFILE": "profile",
- "AWS_DEFAULT_PROFILE": "default_profile",
- "AWS_SDK_LOAD_CONFIG": "1",
- },
- Region: "region", Profile: "profile",
- },
- {
- Env: map[string]string{
- "AWS_DEFAULT_REGION": "default_region",
- "AWS_DEFAULT_PROFILE": "default_profile",
- },
- },
- {
- Env: map[string]string{
- "AWS_DEFAULT_REGION": "default_region",
- "AWS_DEFAULT_PROFILE": "default_profile",
- "AWS_SDK_LOAD_CONFIG": "1",
- },
- Region: "default_region", Profile: "default_profile",
- },
- {
- Env: map[string]string{
- "AWS_REGION": "region",
- "AWS_PROFILE": "profile",
- },
- Region: "region", Profile: "profile",
- UseSharedConfigCall: true,
- },
- {
- Env: map[string]string{
- "AWS_REGION": "region",
- "AWS_DEFAULT_REGION": "default_region",
- "AWS_PROFILE": "profile",
- "AWS_DEFAULT_PROFILE": "default_profile",
- },
- Region: "region", Profile: "profile",
- UseSharedConfigCall: true,
- },
- {
- Env: map[string]string{
- "AWS_REGION": "region",
- "AWS_DEFAULT_REGION": "default_region",
- "AWS_PROFILE": "profile",
- "AWS_DEFAULT_PROFILE": "default_profile",
- "AWS_SDK_LOAD_CONFIG": "1",
- },
- Region: "region", Profile: "profile",
- UseSharedConfigCall: true,
- },
- {
- Env: map[string]string{
- "AWS_DEFAULT_REGION": "default_region",
- "AWS_DEFAULT_PROFILE": "default_profile",
- },
- Region: "default_region", Profile: "default_profile",
- UseSharedConfigCall: true,
- },
- {
- Env: map[string]string{
- "AWS_DEFAULT_REGION": "default_region",
- "AWS_DEFAULT_PROFILE": "default_profile",
- "AWS_SDK_LOAD_CONFIG": "1",
- },
- Region: "default_region", Profile: "default_profile",
- UseSharedConfigCall: true,
- },
- }
-
- for _, c := range cases {
- os.Clearenv()
-
- for k, v := range c.Env {
- os.Setenv(k, v)
- }
-
- var cfg envConfig
- if c.UseSharedConfigCall {
- cfg = loadSharedEnvConfig()
- } else {
- cfg = loadEnvConfig()
- }
-
- assert.Equal(t, c.Region, cfg.Region)
- assert.Equal(t, c.Profile, cfg.Profile)
- }
-}
-
-func TestSharedCredsFilename(t *testing.T) {
- env := stashEnv()
- defer popEnv(env)
-
- os.Setenv("USERPROFILE", "profile_dir")
- expect := filepath.Join("profile_dir", ".aws", "credentials")
- name := sharedCredentialsFilename()
- assert.Equal(t, expect, name)
-
- os.Setenv("HOME", "home_dir")
- expect = filepath.Join("home_dir", ".aws", "credentials")
- name = sharedCredentialsFilename()
- assert.Equal(t, expect, name)
-
- expect = filepath.Join("path/to/credentials/file")
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", expect)
- name = sharedCredentialsFilename()
- assert.Equal(t, expect, name)
-}
-
-func TestSharedConfigFilename(t *testing.T) {
- env := stashEnv()
- defer popEnv(env)
-
- os.Setenv("USERPROFILE", "profile_dir")
- expect := filepath.Join("profile_dir", ".aws", "config")
- name := sharedConfigFilename()
- assert.Equal(t, expect, name)
-
- os.Setenv("HOME", "home_dir")
- expect = filepath.Join("home_dir", ".aws", "config")
- name = sharedConfigFilename()
- assert.Equal(t, expect, name)
-
- expect = filepath.Join("path/to/config/file")
- os.Setenv("AWS_CONFIG_FILE", expect)
- name = sharedConfigFilename()
- assert.Equal(t, expect, name)
-}
-
-func TestSetEnvValue(t *testing.T) {
- env := stashEnv()
- defer popEnv(env)
-
- os.Setenv("empty_key", "")
- os.Setenv("second_key", "2")
- os.Setenv("third_key", "3")
-
- var dst string
- setFromEnvVal(&dst, []string{
- "empty_key", "first_key", "second_key", "third_key",
- })
-
- assert.Equal(t, "2", dst)
-}
-
-func stashEnv() []string {
- env := os.Environ()
- os.Clearenv()
-
- return env
-}
-
-func popEnv(env []string) {
- os.Clearenv()
-
- for _, e := range env {
- p := strings.SplitN(e, "=", 2)
- os.Setenv(p[0], p[1])
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
deleted file mode 100644
index 602f4e1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ /dev/null
@@ -1,393 +0,0 @@
-package session
-
-import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/corehandlers"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
- "github.com/aws/aws-sdk-go/aws/defaults"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/endpoints"
-)
-
-// A Session provides a central location to create service clients from and
-// store configurations and request handlers for those services.
-//
-// Sessions are safe to create service clients concurrently, but it is not safe
-// to mutate the Session concurrently.
-//
-// The Session satisfies the service client's client.ClientConfigProvider.
-type Session struct {
- Config *aws.Config
- Handlers request.Handlers
-}
-
-// New creates a new instance of the handlers merging in the provided configs
-// on top of the SDK's default configurations. Once the Session is created it
-// can be mutated to modify the Config or Handlers. The Session is safe to be
-// read concurrently, but it should not be written to concurrently.
-//
-// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
-// method could now encounter an error when loading the configuration. When
-// The environment variable is set, and an error occurs, New will return a
-// session that will fail all requests reporting the error that occured while
-// loading the session. Use NewSession to get the error when creating the
-// session.
-//
-// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
-// the shared config file (~/.aws/config) will also be loaded, in addition to
-// the shared credentials file (~/.aws/config). Values set in both the
-// shared config, and shared credentials will be taken from the shared
-// credentials file.
-//
-// Deprecated: Use NewSession functiions to create sessions instead. NewSession
-// has the same functionality as New except an error can be returned when the
-// func is called instead of waiting to receive an error until a request is made.
-func New(cfgs ...*aws.Config) *Session {
- // load initial config from environment
- envCfg := loadEnvConfig()
-
- if envCfg.EnableSharedConfig {
- s, err := newSession(envCfg, cfgs...)
- if err != nil {
- // Old session.New expected all errors to be discovered when
- // a request is made, and would report the errors then. This
- // needs to be replicated if an error occurs while creating
- // the session.
- msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
- "Use session.NewSession to handle errors occuring during session creation."
-
- // Session creation failed, need to report the error and prevent
- // any requests from succeeding.
- s = &Session{Config: defaults.Config()}
- s.Config.MergeIn(cfgs...)
- s.Config.Logger.Log("ERROR:", msg, "Error:", err)
- s.Handlers.Validate.PushBack(func(r *request.Request) {
- r.Error = err
- })
- }
- return s
- }
-
- return oldNewSession(cfgs...)
-}
-
-// NewSession returns a new Session created from SDK defaults, config files,
-// environment, and user provided config files. Once the Session is created
-// it can be mutated to modify the Config or Handlers. The Session is safe to
-// be read concurrently, but it should not be written to concurrently.
-//
-// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
-// the shared config file (~/.aws/config) will also be loaded in addition to
-// the shared credentials file (~/.aws/config). Values set in both the
-// shared config, and shared credentials will be taken from the shared
-// credentials file. Enabling the Shared Config will also allow the Session
-// to be built with retrieving credentials with AssumeRole set in the config.
-//
-// See the NewSessionWithOptions func for information on how to override or
-// control through code how the Session will be created. Such as specifing the
-// config profile, and controlling if shared config is enabled or not.
-func NewSession(cfgs ...*aws.Config) (*Session, error) {
- envCfg := loadEnvConfig()
-
- return newSession(envCfg, cfgs...)
-}
-
-// SharedConfigState provides the ability to optionally override the state
-// of the session's creation based on the shared config being enabled or
-// disabled.
-type SharedConfigState int
-
-const (
- // SharedConfigStateFromEnv does not override any state of the
- // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
- // SharedConfigState type.
- SharedConfigStateFromEnv SharedConfigState = iota
-
- // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
- // and disables the shared config functionality.
- SharedConfigDisable
-
- // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
- // and enables the shared config functionality.
- SharedConfigEnable
-)
-
-// Options provides the means to control how a Session is created and what
-// configuration values will be loaded.
-//
-type Options struct {
- // Provides config values for the SDK to use when creating service clients
- // and making API requests to services. Any value set in with this field
- // will override the associated value provided by the SDK defaults,
- // environment or config files where relevent.
- //
- // If not set, configuration values from from SDK defaults, environment,
- // config will be used.
- Config aws.Config
-
- // Overrides the config profile the Session should be created from. If not
- // set the value of the environment variable will be loaded (AWS_PROFILE,
- // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
- //
- // If not set and environment variables are not set the "default"
- // (DefaultSharedConfigProfile) will be used as the profile to load the
- // session config from.
- Profile string
-
- // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
- // environment variable. By default a Session will be created using the
- // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
- //
- // Setting this value to SharedConfigEnable or SharedConfigDisable
- // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
- // and enable or disable the shared config functionality.
- SharedConfigState SharedConfigState
-}
-
-// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
-// environment, and user provided config files. This func uses the Options
-// values to configure how the Session is created.
-//
-// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
-// the shared config file (~/.aws/config) will also be loaded in addition to
-// the shared credentials file (~/.aws/config). Values set in both the
-// shared config, and shared credentials will be taken from the shared
-// credentials file. Enabling the Shared Config will also allow the Session
-// to be built with retrieving credentials with AssumeRole set in the config.
-//
-// // Equivalent to session.New
-// sess, err := session.NewSessionWithOptions(session.Options{})
-//
-// // Specify profile to load for the session's config
-// sess, err := session.NewSessionWithOptions(session.Options{
-// Profile: "profile_name",
-// })
-//
-// // Specify profile for config and region for requests
-// sess, err := session.NewSessionWithOptions(session.Options{
-// Config: aws.Config{Region: aws.String("us-east-1")},
-// Profile: "profile_name",
-// })
-//
-// // Force enable Shared Config support
-// sess, err := session.NewSessionWithOptions(session.Options{
-// SharedConfigState: SharedConfigEnable,
-// })
-func NewSessionWithOptions(opts Options) (*Session, error) {
- var envCfg envConfig
- if opts.SharedConfigState == SharedConfigEnable {
- envCfg = loadSharedEnvConfig()
- } else {
- envCfg = loadEnvConfig()
- }
-
- if len(opts.Profile) > 0 {
- envCfg.Profile = opts.Profile
- }
-
- switch opts.SharedConfigState {
- case SharedConfigDisable:
- envCfg.EnableSharedConfig = false
- case SharedConfigEnable:
- envCfg.EnableSharedConfig = true
- }
-
- return newSession(envCfg, &opts.Config)
-}
-
-// Must is a helper function to ensure the Session is valid and there was no
-// error when calling a NewSession function.
-//
-// This helper is intended to be used in variable initialization to load the
-// Session and configuration at startup. Such as:
-//
-// var sess = session.Must(session.NewSession())
-func Must(sess *Session, err error) *Session {
- if err != nil {
- panic(err)
- }
-
- return sess
-}
-
-func oldNewSession(cfgs ...*aws.Config) *Session {
- cfg := defaults.Config()
- handlers := defaults.Handlers()
-
- // Apply the passed in configs so the configuration can be applied to the
- // default credential chain
- cfg.MergeIn(cfgs...)
- cfg.Credentials = defaults.CredChain(cfg, handlers)
-
- // Reapply any passed in configs to override credentials if set
- cfg.MergeIn(cfgs...)
-
- s := &Session{
- Config: cfg,
- Handlers: handlers,
- }
-
- initHandlers(s)
-
- return s
-}
-
-func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
- cfg := defaults.Config()
- handlers := defaults.Handlers()
-
- // Get a merged version of the user provided config to determine if
- // credentials were.
- userCfg := &aws.Config{}
- userCfg.MergeIn(cfgs...)
-
- // Order config files will be loaded in with later files overwriting
- // previous config file values.
- cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
- if !envCfg.EnableSharedConfig {
- // The shared config file (~/.aws/config) is only loaded if instructed
- // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
- cfgFiles = cfgFiles[1:]
- }
-
- // Load additional config from file(s)
- sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
- if err != nil {
- return nil, err
- }
-
- mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers)
-
- s := &Session{
- Config: cfg,
- Handlers: handlers,
- }
-
- initHandlers(s)
-
- return s, nil
-}
-
-func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) {
- // Merge in user provided configuration
- cfg.MergeIn(userCfg)
-
- // Region if not already set by user
- if len(aws.StringValue(cfg.Region)) == 0 {
- if len(envCfg.Region) > 0 {
- cfg.WithRegion(envCfg.Region)
- } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
- cfg.WithRegion(sharedCfg.Region)
- }
- }
-
- // Configure credentials if not already set
- if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
- if len(envCfg.Creds.AccessKeyID) > 0 {
- cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
- envCfg.Creds,
- )
- } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
- cfgCp := *cfg
- cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
- sharedCfg.AssumeRoleSource.Creds,
- )
- cfg.Credentials = stscreds.NewCredentials(
- &Session{
- Config: &cfgCp,
- Handlers: handlers.Copy(),
- },
- sharedCfg.AssumeRole.RoleARN,
- func(opt *stscreds.AssumeRoleProvider) {
- opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
-
- if len(sharedCfg.AssumeRole.ExternalID) > 0 {
- opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
- }
-
- // MFA not supported
- },
- )
- } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
- cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
- sharedCfg.Creds,
- )
- } else {
- // Fallback to default credentials provider, include mock errors
- // for the credential chain so user can identify why credentials
- // failed to be retrieved.
- cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
- VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
- Providers: []credentials.Provider{
- &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
- &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
- defaults.RemoteCredProvider(*cfg, handlers),
- },
- })
- }
- }
-}
-
-type credProviderError struct {
- Err error
-}
-
-var emptyCreds = credentials.Value{}
-
-func (c credProviderError) Retrieve() (credentials.Value, error) {
- return credentials.Value{}, c.Err
-}
-func (c credProviderError) IsExpired() bool {
- return true
-}
-
-func initHandlers(s *Session) {
- // Add the Validate parameter handler if it is not disabled.
- s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
- if !aws.BoolValue(s.Config.DisableParamValidation) {
- s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
- }
-}
-
-// Copy creates and returns a copy of the current Session, coping the config
-// and handlers. If any additional configs are provided they will be merged
-// on top of the Session's copied config.
-//
-// // Create a copy of the current Session, configured for the us-west-2 region.
-// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
-func (s *Session) Copy(cfgs ...*aws.Config) *Session {
- newSession := &Session{
- Config: s.Config.Copy(cfgs...),
- Handlers: s.Handlers.Copy(),
- }
-
- initHandlers(newSession)
-
- return newSession
-}
-
-// ClientConfig satisfies the client.ConfigProvider interface and is used to
-// configure the service client instances. Passing the Session to the service
-// client's constructor (New) will use this method to configure the client.
-func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
- s = s.Copy(cfgs...)
- endpoint, signingRegion := endpoints.NormalizeEndpoint(
- aws.StringValue(s.Config.Endpoint),
- serviceName,
- aws.StringValue(s.Config.Region),
- aws.BoolValue(s.Config.DisableSSL),
- aws.BoolValue(s.Config.UseDualStack),
- )
-
- return client.Config{
- Config: s.Config,
- Handlers: s.Handlers,
- Endpoint: endpoint,
- SigningRegion: signingRegion,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go
deleted file mode 100644
index f8a8ae4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go
+++ /dev/null
@@ -1,344 +0,0 @@
-package session
-
-import (
- "bytes"
- "fmt"
- "net/http"
- "net/http/httptest"
- "os"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/defaults"
- "github.com/aws/aws-sdk-go/service/s3"
-)
-
-func TestNewDefaultSession(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- s := New(&aws.Config{Region: aws.String("region")})
-
- assert.Equal(t, "region", *s.Config.Region)
- assert.Equal(t, http.DefaultClient, s.Config.HTTPClient)
- assert.NotNil(t, s.Config.Logger)
- assert.Equal(t, aws.LogOff, *s.Config.LogLevel)
-}
-
-func TestNew_WithCustomCreds(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- customCreds := credentials.NewStaticCredentials("AKID", "SECRET", "TOKEN")
- s := New(&aws.Config{Credentials: customCreds})
-
- assert.Equal(t, customCreds, s.Config.Credentials)
-}
-
-type mockLogger struct {
- *bytes.Buffer
-}
-
-func (w mockLogger) Log(args ...interface{}) {
- fmt.Fprintln(w, args...)
-}
-
-func TestNew_WithSessionLoadError(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- os.Setenv("AWS_SDK_LOAD_CONFIG", "1")
- os.Setenv("AWS_CONFIG_FILE", testConfigFilename)
- os.Setenv("AWS_PROFILE", "assume_role_invalid_source_profile")
-
- logger := bytes.Buffer{}
- s := New(&aws.Config{Logger: &mockLogger{&logger}})
-
- assert.NotNil(t, s)
-
- svc := s3.New(s)
- _, err := svc.ListBuckets(&s3.ListBucketsInput{})
-
- assert.Error(t, err)
- assert.Contains(t, logger.String(), "ERROR: failed to create session with AWS_SDK_LOAD_CONFIG enabled")
- assert.Contains(t, err.Error(), SharedConfigAssumeRoleError{
- RoleARN: "assume_role_invalid_source_profile_role_arn",
- }.Error())
-}
-
-func TestSessionCopy(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- os.Setenv("AWS_REGION", "orig_region")
-
- s := Session{
- Config: defaults.Config(),
- Handlers: defaults.Handlers(),
- }
-
- newSess := s.Copy(&aws.Config{Region: aws.String("new_region")})
-
- assert.Equal(t, "orig_region", *s.Config.Region)
- assert.Equal(t, "new_region", *newSess.Config.Region)
-}
-
-func TestSessionClientConfig(t *testing.T) {
- s, err := NewSession(&aws.Config{Region: aws.String("orig_region")})
- assert.NoError(t, err)
-
- cfg := s.ClientConfig("s3", &aws.Config{Region: aws.String("us-west-2")})
-
- assert.Equal(t, "https://s3-us-west-2.amazonaws.com", cfg.Endpoint)
- assert.Empty(t, cfg.SigningRegion)
- assert.Equal(t, "us-west-2", *cfg.Config.Region)
-}
-
-func TestNewSession_NoCredentials(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- s, err := NewSession()
- assert.NoError(t, err)
-
- assert.NotNil(t, s.Config.Credentials)
- assert.NotEqual(t, credentials.AnonymousCredentials, s.Config.Credentials)
-}
-
-func TestNewSessionWithOptions_OverrideProfile(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- os.Setenv("AWS_SDK_LOAD_CONFIG", "1")
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename)
- os.Setenv("AWS_PROFILE", "other_profile")
-
- s, err := NewSessionWithOptions(Options{
- Profile: "full_profile",
- })
- assert.NoError(t, err)
-
- assert.Equal(t, "full_profile_region", *s.Config.Region)
-
- creds, err := s.Config.Credentials.Get()
- assert.NoError(t, err)
- assert.Equal(t, "full_profile_akid", creds.AccessKeyID)
- assert.Equal(t, "full_profile_secret", creds.SecretAccessKey)
- assert.Empty(t, creds.SessionToken)
- assert.Contains(t, creds.ProviderName, "SharedConfigCredentials")
-}
-
-func TestNewSessionWithOptions_OverrideSharedConfigEnable(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- os.Setenv("AWS_SDK_LOAD_CONFIG", "0")
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename)
- os.Setenv("AWS_PROFILE", "full_profile")
-
- s, err := NewSessionWithOptions(Options{
- SharedConfigState: SharedConfigEnable,
- })
- assert.NoError(t, err)
-
- assert.Equal(t, "full_profile_region", *s.Config.Region)
-
- creds, err := s.Config.Credentials.Get()
- assert.NoError(t, err)
- assert.Equal(t, "full_profile_akid", creds.AccessKeyID)
- assert.Equal(t, "full_profile_secret", creds.SecretAccessKey)
- assert.Empty(t, creds.SessionToken)
- assert.Contains(t, creds.ProviderName, "SharedConfigCredentials")
-}
-
-func TestNewSessionWithOptions_OverrideSharedConfigDisable(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- os.Setenv("AWS_SDK_LOAD_CONFIG", "1")
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename)
- os.Setenv("AWS_PROFILE", "full_profile")
-
- s, err := NewSessionWithOptions(Options{
- SharedConfigState: SharedConfigDisable,
- })
- assert.NoError(t, err)
-
- assert.Empty(t, *s.Config.Region)
-
- creds, err := s.Config.Credentials.Get()
- assert.NoError(t, err)
- assert.Equal(t, "full_profile_akid", creds.AccessKeyID)
- assert.Equal(t, "full_profile_secret", creds.SecretAccessKey)
- assert.Empty(t, creds.SessionToken)
- assert.Contains(t, creds.ProviderName, "SharedConfigCredentials")
-}
-
-func TestNewSessionWithOptions_Overrides(t *testing.T) {
- cases := []struct {
- InEnvs map[string]string
- InProfile string
- OutRegion string
- OutCreds credentials.Value
- }{
- {
- InEnvs: map[string]string{
- "AWS_SDK_LOAD_CONFIG": "0",
- "AWS_SHARED_CREDENTIALS_FILE": testConfigFilename,
- "AWS_PROFILE": "other_profile",
- },
- InProfile: "full_profile",
- OutRegion: "full_profile_region",
- OutCreds: credentials.Value{
- AccessKeyID: "full_profile_akid",
- SecretAccessKey: "full_profile_secret",
- ProviderName: "SharedConfigCredentials",
- },
- },
- {
- InEnvs: map[string]string{
- "AWS_SDK_LOAD_CONFIG": "0",
- "AWS_SHARED_CREDENTIALS_FILE": testConfigFilename,
- "AWS_REGION": "env_region",
- "AWS_ACCESS_KEY": "env_akid",
- "AWS_SECRET_ACCESS_KEY": "env_secret",
- "AWS_PROFILE": "other_profile",
- },
- InProfile: "full_profile",
- OutRegion: "env_region",
- OutCreds: credentials.Value{
- AccessKeyID: "env_akid",
- SecretAccessKey: "env_secret",
- ProviderName: "EnvConfigCredentials",
- },
- },
- {
- InEnvs: map[string]string{
- "AWS_SDK_LOAD_CONFIG": "0",
- "AWS_SHARED_CREDENTIALS_FILE": testConfigFilename,
- "AWS_CONFIG_FILE": testConfigOtherFilename,
- "AWS_PROFILE": "shared_profile",
- },
- InProfile: "config_file_load_order",
- OutRegion: "shared_config_region",
- OutCreds: credentials.Value{
- AccessKeyID: "shared_config_akid",
- SecretAccessKey: "shared_config_secret",
- ProviderName: "SharedConfigCredentials",
- },
- },
- }
-
- for _, c := range cases {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- for k, v := range c.InEnvs {
- os.Setenv(k, v)
- }
-
- s, err := NewSessionWithOptions(Options{
- Profile: c.InProfile,
- SharedConfigState: SharedConfigEnable,
- })
- assert.NoError(t, err)
-
- creds, err := s.Config.Credentials.Get()
- assert.NoError(t, err)
- assert.Equal(t, c.OutRegion, *s.Config.Region)
- assert.Equal(t, c.OutCreds.AccessKeyID, creds.AccessKeyID)
- assert.Equal(t, c.OutCreds.SecretAccessKey, creds.SecretAccessKey)
- assert.Equal(t, c.OutCreds.SessionToken, creds.SessionToken)
- assert.Contains(t, creds.ProviderName, c.OutCreds.ProviderName)
- }
-}
-
-func TestSesisonAssumeRole(t *testing.T) {
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- os.Setenv("AWS_REGION", "us-east-1")
- os.Setenv("AWS_SDK_LOAD_CONFIG", "1")
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename)
- os.Setenv("AWS_PROFILE", "assume_role_w_creds")
-
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- const respMsg = `
-
-
-
- arn:aws:sts::account_id:assumed-role/role/session_name
- AKID:session_name
-
-
- AKID
- SECRET
- SESSION_TOKEN
- %s
-
-
-
- request-id
-
-
-`
- w.Write([]byte(fmt.Sprintf(respMsg, time.Now().Add(15*time.Minute).Format("2006-01-02T15:04:05Z"))))
- }))
-
- s, err := NewSession(&aws.Config{Endpoint: aws.String(server.URL), DisableSSL: aws.Bool(true)})
-
- creds, err := s.Config.Credentials.Get()
- assert.NoError(t, err)
- assert.Equal(t, "AKID", creds.AccessKeyID)
- assert.Equal(t, "SECRET", creds.SecretAccessKey)
- assert.Equal(t, "SESSION_TOKEN", creds.SessionToken)
- assert.Contains(t, creds.ProviderName, "AssumeRoleProvider")
-}
-
-func TestSessionAssumeRole_DisableSharedConfig(t *testing.T) {
- // Backwards compatibility with Shared config disabled
- // assume role should not be built into the config.
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- os.Setenv("AWS_SDK_LOAD_CONFIG", "0")
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename)
- os.Setenv("AWS_PROFILE", "assume_role_w_creds")
-
- s, err := NewSession()
- assert.NoError(t, err)
-
- creds, err := s.Config.Credentials.Get()
- assert.NoError(t, err)
- assert.Equal(t, "assume_role_w_creds_akid", creds.AccessKeyID)
- assert.Equal(t, "assume_role_w_creds_secret", creds.SecretAccessKey)
- assert.Contains(t, creds.ProviderName, "SharedConfigCredentials")
-}
-
-func TestSessionAssumeRole_InvalidSourceProfile(t *testing.T) {
- // Backwards compatibility with Shared config disabled
- // assume role should not be built into the config.
- oldEnv := initSessionTestEnv()
- defer popEnv(oldEnv)
-
- os.Setenv("AWS_SDK_LOAD_CONFIG", "1")
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", testConfigFilename)
- os.Setenv("AWS_PROFILE", "assume_role_invalid_source_profile")
-
- s, err := NewSession()
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "SharedConfigAssumeRoleError: failed to load assume role")
- assert.Nil(t, s)
-}
-
-func initSessionTestEnv() (oldEnv []string) {
- oldEnv = stashEnv()
- os.Setenv("AWS_CONFIG_FILE", "file_not_exists")
- os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "file_not_exists")
-
- return oldEnv
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
deleted file mode 100644
index b58076f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package session
-
-import (
- "fmt"
- "io/ioutil"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/go-ini/ini"
-)
-
-const (
- // Static Credentials group
- accessKeyIDKey = `aws_access_key_id` // group required
- secretAccessKey = `aws_secret_access_key` // group required
- sessionTokenKey = `aws_session_token` // optional
-
- // Assume Role Credentials group
- roleArnKey = `role_arn` // group required
- sourceProfileKey = `source_profile` // group required
- externalIDKey = `external_id` // optional
- mfaSerialKey = `mfa_serial` // optional
- roleSessionNameKey = `role_session_name` // optional
-
- // Additional Config fields
- regionKey = `region`
-
- // DefaultSharedConfigProfile is the default profile to be used when
- // loading configuration from the config files if another profile name
- // is not provided.
- DefaultSharedConfigProfile = `default`
-)
-
-type assumeRoleConfig struct {
- RoleARN string
- SourceProfile string
- ExternalID string
- MFASerial string
- RoleSessionName string
-}
-
-// sharedConfig represents the configuration fields of the SDK config files.
-type sharedConfig struct {
- // Credentials values from the config file. Both aws_access_key_id
- // and aws_secret_access_key must be provided together in the same file
- // to be considered valid. The values will be ignored if not a complete group.
- // aws_session_token is an optional field that can be provided if both of the
- // other two fields are also provided.
- //
- // aws_access_key_id
- // aws_secret_access_key
- // aws_session_token
- Creds credentials.Value
-
- AssumeRole assumeRoleConfig
- AssumeRoleSource *sharedConfig
-
- // Region is the region the SDK should use for looking up AWS service endpoints
- // and signing requests.
- //
- // region
- Region string
-}
-
-type sharedConfigFile struct {
- Filename string
- IniData *ini.File
-}
-
-// loadSharedConfig retrieves the configuration from the list of files
-// using the profile provided. The order the files are listed will determine
-// precedence. Values in subsequent files will overwrite values defined in
-// earlier files.
-//
-// For example, given two files A and B. Both define credentials. If the order
-// of the files are A then B, B's credential values will be used instead of A's.
-//
-// See sharedConfig.setFromFile for information how the config files
-// will be loaded.
-func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
- if len(profile) == 0 {
- profile = DefaultSharedConfigProfile
- }
-
- files, err := loadSharedConfigIniFiles(filenames)
- if err != nil {
- return sharedConfig{}, err
- }
-
- cfg := sharedConfig{}
- if err = cfg.setFromIniFiles(profile, files); err != nil {
- return sharedConfig{}, err
- }
-
- if len(cfg.AssumeRole.SourceProfile) > 0 {
- if err := cfg.setAssumeRoleSource(profile, files); err != nil {
- return sharedConfig{}, err
- }
- }
-
- return cfg, nil
-}
-
-func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
- files := make([]sharedConfigFile, 0, len(filenames))
-
- for _, filename := range filenames {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- // Skip files which can't be opened and read for whatever reason
- continue
- }
-
- f, err := ini.Load(b)
- if err != nil {
- return nil, SharedConfigLoadError{Filename: filename}
- }
-
- files = append(files, sharedConfigFile{
- Filename: filename, IniData: f,
- })
- }
-
- return files, nil
-}
-
-func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
- var assumeRoleSrc sharedConfig
-
- // Multiple level assume role chains are not support
- if cfg.AssumeRole.SourceProfile == origProfile {
- assumeRoleSrc = *cfg
- assumeRoleSrc.AssumeRole = assumeRoleConfig{}
- } else {
- err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
- if err != nil {
- return err
- }
- }
-
- if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
- return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
- }
-
- cfg.AssumeRoleSource = &assumeRoleSrc
-
- return nil
-}
-
-func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
- // Trim files from the list that don't exist.
- for _, f := range files {
- if err := cfg.setFromIniFile(profile, f); err != nil {
- if _, ok := err.(SharedConfigProfileNotExistsError); ok {
- // Ignore proviles missings
- continue
- }
- return err
- }
- }
-
- return nil
-}
-
-// setFromFile loads the configuration from the file using
-// the profile provided. A sharedConfig pointer type value is used so that
-// multiple config file loadings can be chained.
-//
-// Only loads complete logically grouped values, and will not set fields in cfg
-// for incomplete grouped values in the config. Such as credentials. For example
-// if a config file only includes aws_access_key_id but no aws_secret_access_key
-// the aws_access_key_id will be ignored.
-func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
- section, err := file.IniData.GetSection(profile)
- if err != nil {
- // Fallback to to alternate profile name: profile
- section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
- if err != nil {
- return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
- }
- }
-
- // Shared Credentials
- akid := section.Key(accessKeyIDKey).String()
- secret := section.Key(secretAccessKey).String()
- if len(akid) > 0 && len(secret) > 0 {
- cfg.Creds = credentials.Value{
- AccessKeyID: akid,
- SecretAccessKey: secret,
- SessionToken: section.Key(sessionTokenKey).String(),
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
- }
- }
-
- // Assume Role
- roleArn := section.Key(roleArnKey).String()
- srcProfile := section.Key(sourceProfileKey).String()
- if len(roleArn) > 0 && len(srcProfile) > 0 {
- cfg.AssumeRole = assumeRoleConfig{
- RoleARN: roleArn,
- SourceProfile: srcProfile,
- ExternalID: section.Key(externalIDKey).String(),
- MFASerial: section.Key(mfaSerialKey).String(),
- RoleSessionName: section.Key(roleSessionNameKey).String(),
- }
- }
-
- // Region
- if v := section.Key(regionKey).String(); len(v) > 0 {
- cfg.Region = v
- }
-
- return nil
-}
-
-// SharedConfigLoadError is an error for the shared config file failed to load.
-type SharedConfigLoadError struct {
- Filename string
- Err error
-}
-
-// Code is the short id of the error.
-func (e SharedConfigLoadError) Code() string {
- return "SharedConfigLoadError"
-}
-
-// Message is the description of the error
-func (e SharedConfigLoadError) Message() string {
- return fmt.Sprintf("failed to load config file, %s", e.Filename)
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e SharedConfigLoadError) OrigErr() error {
- return e.Err
-}
-
-// Error satisfies the error interface.
-func (e SharedConfigLoadError) Error() string {
- return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
-}
-
-// SharedConfigProfileNotExistsError is an error for the shared config when
-// the profile was not find in the config file.
-type SharedConfigProfileNotExistsError struct {
- Profile string
- Err error
-}
-
-// Code is the short id of the error.
-func (e SharedConfigProfileNotExistsError) Code() string {
- return "SharedConfigProfileNotExistsError"
-}
-
-// Message is the description of the error
-func (e SharedConfigProfileNotExistsError) Message() string {
- return fmt.Sprintf("failed to get profile, %s", e.Profile)
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e SharedConfigProfileNotExistsError) OrigErr() error {
- return e.Err
-}
-
-// Error satisfies the error interface.
-func (e SharedConfigProfileNotExistsError) Error() string {
- return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
-}
-
-// SharedConfigAssumeRoleError is an error for the shared config when the
-// profile contains assume role information, but that information is invalid
-// or not complete.
-type SharedConfigAssumeRoleError struct {
- RoleARN string
-}
-
-// Code is the short id of the error.
-func (e SharedConfigAssumeRoleError) Code() string {
- return "SharedConfigAssumeRoleError"
-}
-
-// Message is the description of the error
-func (e SharedConfigAssumeRoleError) Message() string {
- return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
- e.RoleARN)
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e SharedConfigAssumeRoleError) OrigErr() error {
- return nil
-}
-
-// Error satisfies the error interface.
-func (e SharedConfigAssumeRoleError) Error() string {
- return awserr.SprintError(e.Code(), e.Message(), "", nil)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config_test.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config_test.go
deleted file mode 100644
index 1a164d4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config_test.go
+++ /dev/null
@@ -1,264 +0,0 @@
-package session
-
-import (
- "fmt"
- "path/filepath"
- "testing"
-
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/go-ini/ini"
- "github.com/stretchr/testify/assert"
-)
-
-var (
- testConfigFilename = filepath.Join("testdata", "shared_config")
- testConfigOtherFilename = filepath.Join("testdata", "shared_config_other")
-)
-
-func TestLoadSharedConfig(t *testing.T) {
- cases := []struct {
- Filenames []string
- Profile string
- Expected sharedConfig
- Err error
- }{
- {
- Filenames: []string{"file_not_exists"},
- Profile: "default",
- },
- {
- Filenames: []string{testConfigFilename},
- Expected: sharedConfig{
- Region: "default_region",
- },
- },
- {
- Filenames: []string{testConfigOtherFilename, testConfigFilename},
- Profile: "config_file_load_order",
- Expected: sharedConfig{
- Region: "shared_config_region",
- Creds: credentials.Value{
- AccessKeyID: "shared_config_akid",
- SecretAccessKey: "shared_config_secret",
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename),
- },
- },
- },
- {
- Filenames: []string{testConfigFilename, testConfigOtherFilename},
- Profile: "config_file_load_order",
- Expected: sharedConfig{
- Region: "shared_config_other_region",
- Creds: credentials.Value{
- AccessKeyID: "shared_config_other_akid",
- SecretAccessKey: "shared_config_other_secret",
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigOtherFilename),
- },
- },
- },
- {
- Filenames: []string{testConfigOtherFilename, testConfigFilename},
- Profile: "assume_role",
- Expected: sharedConfig{
- AssumeRole: assumeRoleConfig{
- RoleARN: "assume_role_role_arn",
- SourceProfile: "complete_creds",
- },
- AssumeRoleSource: &sharedConfig{
- Creds: credentials.Value{
- AccessKeyID: "complete_creds_akid",
- SecretAccessKey: "complete_creds_secret",
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename),
- },
- },
- },
- },
- {
- Filenames: []string{testConfigOtherFilename, testConfigFilename},
- Profile: "assume_role_invalid_source_profile",
- Expected: sharedConfig{
- AssumeRole: assumeRoleConfig{
- RoleARN: "assume_role_invalid_source_profile_role_arn",
- SourceProfile: "profile_not_exists",
- },
- },
- Err: SharedConfigAssumeRoleError{RoleARN: "assume_role_invalid_source_profile_role_arn"},
- },
- {
- Filenames: []string{testConfigOtherFilename, testConfigFilename},
- Profile: "assume_role_w_creds",
- Expected: sharedConfig{
- Creds: credentials.Value{
- AccessKeyID: "assume_role_w_creds_akid",
- SecretAccessKey: "assume_role_w_creds_secret",
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename),
- },
- AssumeRole: assumeRoleConfig{
- RoleARN: "assume_role_w_creds_role_arn",
- SourceProfile: "assume_role_w_creds",
- ExternalID: "1234",
- RoleSessionName: "assume_role_w_creds_session_name",
- },
- AssumeRoleSource: &sharedConfig{
- Creds: credentials.Value{
- AccessKeyID: "assume_role_w_creds_akid",
- SecretAccessKey: "assume_role_w_creds_secret",
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename),
- },
- },
- },
- },
- {
- Filenames: []string{testConfigOtherFilename, testConfigFilename},
- Profile: "assume_role_wo_creds",
- Expected: sharedConfig{
- AssumeRole: assumeRoleConfig{
- RoleARN: "assume_role_wo_creds_role_arn",
- SourceProfile: "assume_role_wo_creds",
- },
- },
- Err: SharedConfigAssumeRoleError{RoleARN: "assume_role_wo_creds_role_arn"},
- },
- {
- Filenames: []string{filepath.Join("testdata", "shared_config_invalid_ini")},
- Profile: "profile_name",
- Err: SharedConfigLoadError{Filename: filepath.Join("testdata", "shared_config_invalid_ini")},
- },
- }
-
- for i, c := range cases {
- cfg, err := loadSharedConfig(c.Profile, c.Filenames)
- if c.Err != nil {
- assert.Contains(t, err.Error(), c.Err.Error(), "expected error, %d", i)
- continue
- }
-
- assert.NoError(t, err, "unexpected error, %d", i)
- assert.Equal(t, c.Expected, cfg, "not equal, %d", i)
- }
-}
-
-func TestLoadSharedConfigFromFile(t *testing.T) {
- filename := testConfigFilename
- f, err := ini.Load(filename)
- if err != nil {
- t.Fatalf("failed to load test config file, %s, %v", filename, err)
- }
- iniFile := sharedConfigFile{IniData: f, Filename: filename}
-
- cases := []struct {
- Profile string
- Expected sharedConfig
- Err error
- }{
- {
- Profile: "default",
- Expected: sharedConfig{Region: "default_region"},
- },
- {
- Profile: "alt_profile_name",
- Expected: sharedConfig{Region: "alt_profile_name_region"},
- },
- {
- Profile: "short_profile_name_first",
- Expected: sharedConfig{Region: "short_profile_name_first_short"},
- },
- {
- Profile: "partial_creds",
- Expected: sharedConfig{},
- },
- {
- Profile: "complete_creds",
- Expected: sharedConfig{
- Creds: credentials.Value{
- AccessKeyID: "complete_creds_akid",
- SecretAccessKey: "complete_creds_secret",
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename),
- },
- },
- },
- {
- Profile: "complete_creds_with_token",
- Expected: sharedConfig{
- Creds: credentials.Value{
- AccessKeyID: "complete_creds_with_token_akid",
- SecretAccessKey: "complete_creds_with_token_secret",
- SessionToken: "complete_creds_with_token_token",
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename),
- },
- },
- },
- {
- Profile: "full_profile",
- Expected: sharedConfig{
- Creds: credentials.Value{
- AccessKeyID: "full_profile_akid",
- SecretAccessKey: "full_profile_secret",
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", testConfigFilename),
- },
- Region: "full_profile_region",
- },
- },
- {
- Profile: "partial_assume_role",
- Expected: sharedConfig{},
- },
- {
- Profile: "assume_role",
- Expected: sharedConfig{
- AssumeRole: assumeRoleConfig{
- RoleARN: "assume_role_role_arn",
- SourceProfile: "complete_creds",
- },
- },
- },
- {
- Profile: "does_not_exists",
- Err: SharedConfigProfileNotExistsError{Profile: "does_not_exists"},
- },
- }
-
- for i, c := range cases {
- cfg := sharedConfig{}
-
- err := cfg.setFromIniFile(c.Profile, iniFile)
- if c.Err != nil {
- assert.Contains(t, err.Error(), c.Err.Error(), "expected error, %d", i)
- continue
- }
-
- assert.NoError(t, err, "unexpected error, %d", i)
- assert.Equal(t, c.Expected, cfg, "not equal, %d", i)
- }
-}
-
-func TestLoadSharedConfigIniFiles(t *testing.T) {
- cases := []struct {
- Filenames []string
- Expected []sharedConfigFile
- }{
- {
- Filenames: []string{"not_exists", testConfigFilename},
- Expected: []sharedConfigFile{
- {Filename: testConfigFilename},
- },
- },
- {
- Filenames: []string{testConfigFilename, testConfigOtherFilename},
- Expected: []sharedConfigFile{
- {Filename: testConfigFilename},
- {Filename: testConfigOtherFilename},
- },
- },
- }
-
- for i, c := range cases {
- files, err := loadSharedConfigIniFiles(c.Filenames)
- assert.NoError(t, err, "unexpected error, %d", i)
- assert.Equal(t, len(c.Expected), len(files), "expected num files, %d", i)
-
- for i, expectedFile := range c.Expected {
- assert.Equal(t, expectedFile.Filename, files[i].Filename)
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config b/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config
deleted file mode 100644
index e41fe21..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config
+++ /dev/null
@@ -1,60 +0,0 @@
-[default]
-s3 =
- unsupported_key=123
- other_unsupported=abc
-
-region = default_region
-
-[profile alt_profile_name]
-region = alt_profile_name_region
-
-[short_profile_name_first]
-region = short_profile_name_first_short
-
-[profile short_profile_name_first]
-region = short_profile_name_first_alt
-
-[partial_creds]
-aws_access_key_id = partial_creds_akid
-
-[complete_creds]
-aws_access_key_id = complete_creds_akid
-aws_secret_access_key = complete_creds_secret
-
-[complete_creds_with_token]
-aws_access_key_id = complete_creds_with_token_akid
-aws_secret_access_key = complete_creds_with_token_secret
-aws_session_token = complete_creds_with_token_token
-
-[full_profile]
-aws_access_key_id = full_profile_akid
-aws_secret_access_key = full_profile_secret
-region = full_profile_region
-
-[config_file_load_order]
-region = shared_config_region
-aws_access_key_id = shared_config_akid
-aws_secret_access_key = shared_config_secret
-
-[partial_assume_role]
-role_arn = partial_assume_role_role_arn
-
-[assume_role]
-role_arn = assume_role_role_arn
-source_profile = complete_creds
-
-[assume_role_invalid_source_profile]
-role_arn = assume_role_invalid_source_profile_role_arn
-source_profile = profile_not_exists
-
-[assume_role_w_creds]
-role_arn = assume_role_w_creds_role_arn
-source_profile = assume_role_w_creds
-external_id = 1234
-role_session_name = assume_role_w_creds_session_name
-aws_access_key_id = assume_role_w_creds_akid
-aws_secret_access_key = assume_role_w_creds_secret
-
-[assume_role_wo_creds]
-role_arn = assume_role_wo_creds_role_arn
-source_profile = assume_role_wo_creds
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config_invalid_ini b/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config_invalid_ini
deleted file mode 100644
index 4db0389..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config_invalid_ini
+++ /dev/null
@@ -1 +0,0 @@
-[profile_nam
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config_other b/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config_other
deleted file mode 100644
index 615831b..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/testdata/shared_config_other
+++ /dev/null
@@ -1,17 +0,0 @@
-[default]
-region = default_region
-
-[partial_creds]
-aws_access_key_id = AKID
-
-[profile alt_profile_name]
-region = alt_profile_name_region
-
-[creds_from_credentials]
-aws_access_key_id = creds_from_config_akid
-aws_secret_access_key = creds_from_config_secret
-
-[config_file_load_order]
-region = shared_config_other_region
-aws_access_key_id = shared_config_other_akid
-aws_secret_access_key = shared_config_other_secret
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_4_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_4_test.go
deleted file mode 100644
index e559838..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_4_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// +build !go1.5
-
-package v4_test
-
-import (
- "fmt"
- "net/http"
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/signer/v4"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/stretchr/testify/assert"
-)
-
-func TestStandaloneSign(t *testing.T) {
- creds := unit.Session.Config.Credentials
- signer := v4.NewSigner(creds)
-
- for _, c := range standaloneSignCases {
- host := fmt.Sprintf("%s.%s.%s.amazonaws.com",
- c.SubDomain, c.Region, c.Service)
-
- req, err := http.NewRequest("GET", fmt.Sprintf("https://%s", host), nil)
- assert.NoError(t, err)
-
- req.URL.Path = c.OrigURI
- req.URL.RawQuery = c.OrigQuery
- req.URL.Opaque = fmt.Sprintf("//%s%s", host, c.EscapedURI)
- opaqueURI := req.URL.Opaque
-
- _, err = signer.Sign(req, nil, c.Service, c.Region, time.Unix(0, 0))
- assert.NoError(t, err)
-
- actual := req.Header.Get("Authorization")
- assert.Equal(t, c.ExpSig, actual)
- assert.Equal(t, c.OrigURI, req.URL.Path)
- assert.Equal(t, opaqueURI, req.URL.Opaque)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_5_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_5_test.go
deleted file mode 100644
index 6f0d549..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_5_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// +build go1.5
-
-package v4_test
-
-import (
- "fmt"
- "net/http"
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/signer/v4"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/stretchr/testify/assert"
-)
-
-func TestStandaloneSign(t *testing.T) {
- creds := unit.Session.Config.Credentials
- signer := v4.NewSigner(creds)
-
- for _, c := range standaloneSignCases {
- host := fmt.Sprintf("https://%s.%s.%s.amazonaws.com",
- c.SubDomain, c.Region, c.Service)
-
- req, err := http.NewRequest("GET", host, nil)
- assert.NoError(t, err)
-
- // URL.EscapedPath() will be used by the signer to get the
- // escaped form of the request's URI path.
- req.URL.Path = c.OrigURI
- req.URL.RawQuery = c.OrigQuery
-
- _, err = signer.Sign(req, nil, c.Service, c.Region, time.Unix(0, 0))
- assert.NoError(t, err)
-
- actual := req.Header.Get("Authorization")
- assert.Equal(t, c.ExpSig, actual)
- assert.Equal(t, c.OrigURI, req.URL.Path)
- assert.Equal(t, c.EscapedURI, req.URL.EscapedPath())
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go
deleted file mode 100644
index f86293a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package v4_test
-
-import (
- "net/http"
- "net/url"
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/signer/v4"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/stretchr/testify/assert"
-)
-
-var standaloneSignCases = []struct {
- OrigURI string
- OrigQuery string
- Region, Service, SubDomain string
- ExpSig string
- EscapedURI string
-}{
- {
- OrigURI: `/logs-*/_search`,
- OrigQuery: `pretty=true`,
- Region: "us-west-2", Service: "es", SubDomain: "hostname-clusterkey",
- EscapedURI: `/logs-%2A/_search`,
- ExpSig: `AWS4-HMAC-SHA256 Credential=AKID/19700101/us-west-2/es/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=79d0760751907af16f64a537c1242416dacf51204a7dd5284492d15577973b91`,
- },
-}
-
-func TestPresignHandler(t *testing.T) {
- svc := s3.New(unit.Session)
- req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
- Bucket: aws.String("bucket"),
- Key: aws.String("key"),
- ContentDisposition: aws.String("a+b c$d"),
- ACL: aws.String("public-read"),
- })
- req.Time = time.Unix(0, 0)
- urlstr, err := req.Presign(5 * time.Minute)
-
- assert.NoError(t, err)
-
- expectedDate := "19700101T000000Z"
- expectedHeaders := "content-disposition;host;x-amz-acl"
- expectedSig := "b2754ba8ffeb74a40b94767017e24c4672107d6d5a894648d5d332ca61f5ffe4"
- expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
-
- u, _ := url.Parse(urlstr)
- urlQ := u.Query()
- assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
- assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
- assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
- assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
- assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))
-
- assert.NotContains(t, urlstr, "+") // + encoded as %20
-}
-
-func TestPresignRequest(t *testing.T) {
- svc := s3.New(unit.Session)
- req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
- Bucket: aws.String("bucket"),
- Key: aws.String("key"),
- ContentDisposition: aws.String("a+b c$d"),
- ACL: aws.String("public-read"),
- })
- req.Time = time.Unix(0, 0)
- urlstr, headers, err := req.PresignRequest(5 * time.Minute)
-
- assert.NoError(t, err)
-
- expectedDate := "19700101T000000Z"
- expectedHeaders := "content-disposition;host;x-amz-acl;x-amz-content-sha256"
- expectedSig := "0d200ba61501d752acd06f39ef4dbe7d83ffd5ea15978dc3476dfc00b8eb574e"
- expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
- expectedHeaderMap := http.Header{
- "x-amz-acl": []string{"public-read"},
- "content-disposition": []string{"a+b c$d"},
- "x-amz-content-sha256": []string{"UNSIGNED-PAYLOAD"},
- }
-
- u, _ := url.Parse(urlstr)
- urlQ := u.Query()
- assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
- assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
- assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
- assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
- assert.Equal(t, expectedHeaderMap, headers)
- assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))
-
- assert.NotContains(t, urlstr, "+") // + encoded as %20
-}
-
-func TestStandaloneSign_CustomURIEscape(t *testing.T) {
- var expectSig = `AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/es/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=6601e883cc6d23871fd6c2a394c5677ea2b8c82b04a6446786d64cd74f520967`
-
- creds := unit.Session.Config.Credentials
- signer := v4.NewSigner(creds, func(s *v4.Signer) {
- s.DisableURIPathEscaping = true
- })
-
- host := "https://subdomain.us-east-1.es.amazonaws.com"
- req, err := http.NewRequest("GET", host, nil)
- assert.NoError(t, err)
-
- req.URL.Path = `/log-*/_search`
- req.URL.Opaque = "//subdomain.us-east-1.es.amazonaws.com/log-%2A/_search"
-
- _, err = signer.Sign(req, nil, "es", "us-east-1", time.Unix(0, 0))
- assert.NoError(t, err)
-
- actual := req.Header.Get("Authorization")
- assert.Equal(t, expectSig, actual)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
deleted file mode 100644
index 244c86d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package v4
-
-import (
- "net/http"
- "strings"
-)
-
-// validator houses a set of rule needed for validation of a
-// string value
-type rules []rule
-
-// rule interface allows for more flexible rules and just simply
-// checks whether or not a value adheres to that rule
-type rule interface {
- IsValid(value string) bool
-}
-
-// IsValid will iterate through all rules and see if any rules
-// apply to the value and supports nested rules
-func (r rules) IsValid(value string) bool {
- for _, rule := range r {
- if rule.IsValid(value) {
- return true
- }
- }
- return false
-}
-
-// mapRule generic rule for maps
-type mapRule map[string]struct{}
-
-// IsValid for the map rule satisfies whether it exists in the map
-func (m mapRule) IsValid(value string) bool {
- _, ok := m[value]
- return ok
-}
-
-// whitelist is a generic rule for whitelisting
-type whitelist struct {
- rule
-}
-
-// IsValid for whitelist checks if the value is within the whitelist
-func (w whitelist) IsValid(value string) bool {
- return w.rule.IsValid(value)
-}
-
-// blacklist is a generic rule for blacklisting
-type blacklist struct {
- rule
-}
-
-// IsValid for whitelist checks if the value is within the whitelist
-func (b blacklist) IsValid(value string) bool {
- return !b.rule.IsValid(value)
-}
-
-type patterns []string
-
-// IsValid for patterns checks each pattern and returns if a match has
-// been found
-func (p patterns) IsValid(value string) bool {
- for _, pattern := range p {
- if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
- return true
- }
- }
- return false
-}
-
-// inclusiveRules rules allow for rules to depend on one another
-type inclusiveRules []rule
-
-// IsValid will return true if all rules are true
-func (r inclusiveRules) IsValid(value string) bool {
- for _, rule := range r {
- if !rule.IsValid(value) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go
deleted file mode 100644
index 7dfddc8..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package v4
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestRuleCheckWhitelist(t *testing.T) {
- w := whitelist{
- mapRule{
- "Cache-Control": struct{}{},
- },
- }
-
- assert.True(t, w.IsValid("Cache-Control"))
- assert.False(t, w.IsValid("Cache-"))
-}
-
-func TestRuleCheckBlacklist(t *testing.T) {
- b := blacklist{
- mapRule{
- "Cache-Control": struct{}{},
- },
- }
-
- assert.False(t, b.IsValid("Cache-Control"))
- assert.True(t, b.IsValid("Cache-"))
-}
-
-func TestRuleCheckPattern(t *testing.T) {
- p := patterns{"X-Amz-Meta-"}
-
- assert.True(t, p.IsValid("X-Amz-Meta-"))
- assert.True(t, p.IsValid("X-Amz-Meta-Star"))
- assert.False(t, p.IsValid("Cache-"))
-}
-
-func TestRuleComplexWhitelist(t *testing.T) {
- w := rules{
- whitelist{
- mapRule{
- "Cache-Control": struct{}{},
- },
- },
- patterns{"X-Amz-Meta-"},
- }
-
- r := rules{
- inclusiveRules{patterns{"X-Amz-"}, blacklist{w}},
- }
-
- assert.True(t, r.IsValid("X-Amz-Blah"))
- assert.False(t, r.IsValid("X-Amz-Meta-"))
- assert.False(t, r.IsValid("X-Amz-Meta-Star"))
- assert.False(t, r.IsValid("Cache-Control"))
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
deleted file mode 100644
index bd082e9..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build go1.5
-
-package v4
-
-import (
- "net/url"
- "strings"
-)
-
-func getURIPath(u *url.URL) string {
- var uri string
-
- if len(u.Opaque) > 0 {
- uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
- } else {
- uri = u.EscapedPath()
- }
-
- if len(uri) == 0 {
- uri = "/"
- }
-
- return uri
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go
deleted file mode 100644
index 7966041..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build !go1.5
-
-package v4
-
-import (
- "net/url"
- "strings"
-)
-
-func getURIPath(u *url.URL) string {
- var uri string
-
- if len(u.Opaque) > 0 {
- uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
- } else {
- uri = u.Path
- }
-
- if len(uri) == 0 {
- uri = "/"
- }
-
- return uri
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
deleted file mode 100644
index 986530b..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ /dev/null
@@ -1,713 +0,0 @@
-// Package v4 implements signing for AWS V4 signer
-//
-// Provides request signing for request that need to be signed with
-// AWS V4 Signatures.
-//
-// Standalone Signer
-//
-// Generally using the signer outside of the SDK should not require any additional
-// logic when using Go v1.5 or higher. The signer does this by taking advantage
-// of the URL.EscapedPath method. If your request URI requires additional escaping
-// you many need to use the URL.Opaque to define what the raw URI should be sent
-// to the service as.
-//
-// The signer will first check the URL.Opaque field, and use its value if set.
-// The signer does require the URL.Opaque field to be set in the form of:
-//
-// "///"
-//
-// // e.g.
-// "//example.com/some/path"
-//
-// The leading "//" and hostname are required or the URL.Opaque escaping will
-// not work correctly.
-//
-// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
-// method and using the returned value. If you're using Go v1.4 you must set
-// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
-// Go v1.5 the signer will fallback to URL.Path.
-//
-// AWS v4 signature validation requires that the canonical string's URI path
-// element must be the URI escaped form of the HTTP request's path.
-// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
-//
-// The Go HTTP client will perform escaping automatically on the request. Some
-// of these escaping may cause signature validation errors because the HTTP
-// request differs from the URI path or query that the signature was generated.
-// https://golang.org/pkg/net/url/#URL.EscapedPath
-//
-// Because of this, it is recommended that when using the signer outside of the
-// SDK that explicitly escaping the request prior to being signed is preferable,
-// and will help prevent signature validation errors. This can be done by setting
-// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
-// call URL.EscapedPath() if Opaque is not set.
-//
-// Test `TestStandaloneSign` provides a complete example of using the signer
-// outside of the SDK and pre-escaping the URI path.
-package v4
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol/rest"
-)
-
-const (
- authHeaderPrefix = "AWS4-HMAC-SHA256"
- timeFormat = "20060102T150405Z"
- shortTimeFormat = "20060102"
-
- // emptyStringSHA256 is a SHA256 of an empty string
- emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
-)
-
-var ignoredHeaders = rules{
- blacklist{
- mapRule{
- "Authorization": struct{}{},
- "User-Agent": struct{}{},
- },
- },
-}
-
-// requiredSignedHeaders is a whitelist for build canonical headers.
-var requiredSignedHeaders = rules{
- whitelist{
- mapRule{
- "Cache-Control": struct{}{},
- "Content-Disposition": struct{}{},
- "Content-Encoding": struct{}{},
- "Content-Language": struct{}{},
- "Content-Md5": struct{}{},
- "Content-Type": struct{}{},
- "Expires": struct{}{},
- "If-Match": struct{}{},
- "If-Modified-Since": struct{}{},
- "If-None-Match": struct{}{},
- "If-Unmodified-Since": struct{}{},
- "Range": struct{}{},
- "X-Amz-Acl": struct{}{},
- "X-Amz-Copy-Source": struct{}{},
- "X-Amz-Copy-Source-If-Match": struct{}{},
- "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
- "X-Amz-Copy-Source-If-None-Match": struct{}{},
- "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
- "X-Amz-Copy-Source-Range": struct{}{},
- "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
- "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
- "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
- "X-Amz-Grant-Full-control": struct{}{},
- "X-Amz-Grant-Read": struct{}{},
- "X-Amz-Grant-Read-Acp": struct{}{},
- "X-Amz-Grant-Write": struct{}{},
- "X-Amz-Grant-Write-Acp": struct{}{},
- "X-Amz-Metadata-Directive": struct{}{},
- "X-Amz-Mfa": struct{}{},
- "X-Amz-Request-Payer": struct{}{},
- "X-Amz-Server-Side-Encryption": struct{}{},
- "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
- "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
- "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
- "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
- "X-Amz-Storage-Class": struct{}{},
- "X-Amz-Website-Redirect-Location": struct{}{},
- },
- },
- patterns{"X-Amz-Meta-"},
-}
-
-// allowedHoisting is a whitelist for build query headers. The boolean value
-// represents whether or not it is a pattern.
-var allowedQueryHoisting = inclusiveRules{
- blacklist{requiredSignedHeaders},
- patterns{"X-Amz-"},
-}
-
-// Signer applies AWS v4 signing to given request. Use this to sign requests
-// that need to be signed with AWS V4 Signatures.
-type Signer struct {
- // The authentication credentials the request will be signed against.
- // This value must be set to sign requests.
- Credentials *credentials.Credentials
-
- // Sets the log level the signer should use when reporting information to
- // the logger. If the logger is nil nothing will be logged. See
- // aws.LogLevelType for more information on available logging levels
- //
- // By default nothing will be logged.
- Debug aws.LogLevelType
-
- // The logger loging information will be written to. If there the logger
- // is nil, nothing will be logged.
- Logger aws.Logger
-
- // Disables the Signer's moving HTTP header key/value pairs from the HTTP
- // request header to the request's query string. This is most commonly used
- // with pre-signed requests preventing headers from being added to the
- // request's query string.
- DisableHeaderHoisting bool
-
- // Disables the automatic escaping of the URI path of the request for the
- // siganture's canonical string's path. For services that do not need additional
- // escaping then use this to disable the signer escaping the path.
- //
- // S3 is an example of a service that does not need additional escaping.
- //
- // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
- DisableURIPathEscaping bool
-
- // currentTimeFn returns the time value which represents the current time.
- // This value should only be used for testing. If it is nil the default
- // time.Now will be used.
- currentTimeFn func() time.Time
-}
-
-// NewSigner returns a Signer pointer configured with the credentials and optional
-// option values provided. If not options are provided the Signer will use its
-// default configuration.
-func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
- v4 := &Signer{
- Credentials: credentials,
- }
-
- for _, option := range options {
- option(v4)
- }
-
- return v4
-}
-
-type signingCtx struct {
- ServiceName string
- Region string
- Request *http.Request
- Body io.ReadSeeker
- Query url.Values
- Time time.Time
- ExpireTime time.Duration
- SignedHeaderVals http.Header
-
- DisableURIPathEscaping bool
-
- credValues credentials.Value
- isPresign bool
- formattedTime string
- formattedShortTime string
-
- bodyDigest string
- signedHeaders string
- canonicalHeaders string
- canonicalString string
- credentialString string
- stringToSign string
- signature string
- authorization string
-}
-
-// Sign signs AWS v4 requests with the provided body, service name, region the
-// request is made to, and time the request is signed at. The signTime allows
-// you to specify that a request is signed for the future, and cannot be
-// used until then.
-//
-// Returns a list of HTTP headers that were included in the signature or an
-// error if signing the request failed. Generally for signed requests this value
-// is not needed as the full request context will be captured by the http.Request
-// value. It is included for reference though.
-//
-// Sign will set the request's Body to be the `body` parameter passed in. If
-// the body is not already an io.ReadCloser, it will be wrapped within one. If
-// a `nil` body parameter passed to Sign, the request's Body field will be
-// also set to nil. Its important to note that this functionality will not
-// change the request's ContentLength of the request.
-//
-// Sign differs from Presign in that it will sign the request using HTTP
-// header values. This type of signing is intended for http.Request values that
-// will not be shared, or are shared in a way the header values on the request
-// will not be lost.
-//
-// The requests body is an io.ReadSeeker so the SHA256 of the body can be
-// generated. To bypass the signer computing the hash you can set the
-// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
-// only compute the hash if the request header value is empty.
-func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
- return v4.signWithBody(r, body, service, region, 0, signTime)
-}
-
-// Presign signs AWS v4 requests with the provided body, service name, region
-// the request is made to, and time the request is signed at. The signTime
-// allows you to specify that a request is signed for the future, and cannot
-// be used until then.
-//
-// Returns a list of HTTP headers that were included in the signature or an
-// error if signing the request failed. For presigned requests these headers
-// and their values must be included on the HTTP request when it is made. This
-// is helpful to know what header values need to be shared with the party the
-// presigned request will be distributed to.
-//
-// Presign differs from Sign in that it will sign the request using query string
-// instead of header values. This allows you to share the Presigned Request's
-// URL with third parties, or distribute it throughout your system with minimal
-// dependencies.
-//
-// Presign also takes an exp value which is the duration the
-// signed request will be valid after the signing time. This is allows you to
-// set when the request will expire.
-//
-// The requests body is an io.ReadSeeker so the SHA256 of the body can be
-// generated. To bypass the signer computing the hash you can set the
-// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
-// only compute the hash if the request header value is empty.
-//
-// Presigning a S3 request will not compute the body's SHA256 hash by default.
-// This is done due to the general use case for S3 presigned URLs is to share
-// PUT/GET capabilities. If you would like to include the body's SHA256 in the
-// presigned request's signature you can set the "X-Amz-Content-Sha256"
-// HTTP header and that will be included in the request's signature.
-func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
- return v4.signWithBody(r, body, service, region, exp, signTime)
-}
-
-func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
- currentTimeFn := v4.currentTimeFn
- if currentTimeFn == nil {
- currentTimeFn = time.Now
- }
-
- ctx := &signingCtx{
- Request: r,
- Body: body,
- Query: r.URL.Query(),
- Time: signTime,
- ExpireTime: exp,
- isPresign: exp != 0,
- ServiceName: service,
- Region: region,
- DisableURIPathEscaping: v4.DisableURIPathEscaping,
- }
-
- if ctx.isRequestSigned() {
- ctx.Time = currentTimeFn()
- ctx.handlePresignRemoval()
- }
-
- var err error
- ctx.credValues, err = v4.Credentials.Get()
- if err != nil {
- return http.Header{}, err
- }
-
- ctx.assignAmzQueryValues()
- ctx.build(v4.DisableHeaderHoisting)
-
- // If the request is not presigned the body should be attached to it. This
- // prevents the confusion of wanting to send a signed request without
- // the body the request was signed for attached.
- if !ctx.isPresign {
- var reader io.ReadCloser
- if body != nil {
- var ok bool
- if reader, ok = body.(io.ReadCloser); !ok {
- reader = ioutil.NopCloser(body)
- }
- }
- r.Body = reader
- }
-
- if v4.Debug.Matches(aws.LogDebugWithSigning) {
- v4.logSigningInfo(ctx)
- }
-
- return ctx.SignedHeaderVals, nil
-}
-
-func (ctx *signingCtx) handlePresignRemoval() {
- if !ctx.isPresign {
- return
- }
-
- // The credentials have expired for this request. The current signing
- // is invalid, and needs to be request because the request will fail.
- ctx.removePresign()
-
- // Update the request's query string to ensure the values stays in
- // sync in the case retrieving the new credentials fails.
- ctx.Request.URL.RawQuery = ctx.Query.Encode()
-}
-
-func (ctx *signingCtx) assignAmzQueryValues() {
- if ctx.isPresign {
- ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
- if ctx.credValues.SessionToken != "" {
- ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
- } else {
- ctx.Query.Del("X-Amz-Security-Token")
- }
-
- return
- }
-
- if ctx.credValues.SessionToken != "" {
- ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
- }
-}
-
-// SignRequestHandler is a named request handler the SDK will use to sign
-// service client request with using the V4 signature.
-var SignRequestHandler = request.NamedHandler{
- Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
-}
-
-// SignSDKRequest signs an AWS request with the V4 signature. This
-// request handler is bested used only with the SDK's built in service client's
-// API operation requests.
-//
-// This function should not be used on its on its own, but in conjunction with
-// an AWS service client's API operation call. To sign a standalone request
-// not created by a service client's API operation method use the "Sign" or
-// "Presign" functions of the "Signer" type.
-//
-// If the credentials of the request's config are set to
-// credentials.AnonymousCredentials the request will not be signed.
-func SignSDKRequest(req *request.Request) {
- signSDKRequestWithCurrTime(req, time.Now)
-}
-func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) {
- // If the request does not need to be signed ignore the signing of the
- // request if the AnonymousCredentials object is used.
- if req.Config.Credentials == credentials.AnonymousCredentials {
- return
- }
-
- region := req.ClientInfo.SigningRegion
- if region == "" {
- region = aws.StringValue(req.Config.Region)
- }
-
- name := req.ClientInfo.SigningName
- if name == "" {
- name = req.ClientInfo.ServiceName
- }
-
- v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
- v4.Debug = req.Config.LogLevel.Value()
- v4.Logger = req.Config.Logger
- v4.DisableHeaderHoisting = req.NotHoist
- v4.currentTimeFn = curTimeFn
- if name == "s3" {
- // S3 service should not have any escaping applied
- v4.DisableURIPathEscaping = true
- }
- })
-
- signingTime := req.Time
- if !req.LastSignedAt.IsZero() {
- signingTime = req.LastSignedAt
- }
-
- signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
- name, region, req.ExpireTime, signingTime,
- )
- if err != nil {
- req.Error = err
- req.SignedHeaderVals = nil
- return
- }
-
- req.SignedHeaderVals = signedHeaders
- req.LastSignedAt = curTimeFn()
-}
-
-const logSignInfoMsg = `DEBUG: Request Signature:
----[ CANONICAL STRING ]-----------------------------
-%s
----[ STRING TO SIGN ]--------------------------------
-%s%s
------------------------------------------------------`
-const logSignedURLMsg = `
----[ SIGNED URL ]------------------------------------
-%s`
-
-func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
- signedURLMsg := ""
- if ctx.isPresign {
- signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
- }
- msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
- v4.Logger.Log(msg)
-}
-
-func (ctx *signingCtx) build(disableHeaderHoisting bool) {
- ctx.buildTime() // no depends
- ctx.buildCredentialString() // no depends
-
- unsignedHeaders := ctx.Request.Header
- if ctx.isPresign {
- if !disableHeaderHoisting {
- urlValues := url.Values{}
- urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
- for k := range urlValues {
- ctx.Query[k] = urlValues[k]
- }
- }
- }
-
- ctx.buildBodyDigest()
- ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
- ctx.buildCanonicalString() // depends on canon headers / signed headers
- ctx.buildStringToSign() // depends on canon string
- ctx.buildSignature() // depends on string to sign
-
- if ctx.isPresign {
- ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
- } else {
- parts := []string{
- authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
- "SignedHeaders=" + ctx.signedHeaders,
- "Signature=" + ctx.signature,
- }
- ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
- }
-}
-
-func (ctx *signingCtx) buildTime() {
- ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
- ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
-
- if ctx.isPresign {
- duration := int64(ctx.ExpireTime / time.Second)
- ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
- ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
- } else {
- ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
- }
-}
-
-func (ctx *signingCtx) buildCredentialString() {
- ctx.credentialString = strings.Join([]string{
- ctx.formattedShortTime,
- ctx.Region,
- ctx.ServiceName,
- "aws4_request",
- }, "/")
-
- if ctx.isPresign {
- ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
- }
-}
-
-func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
- query := url.Values{}
- unsignedHeaders := http.Header{}
- for k, h := range header {
- if r.IsValid(k) {
- query[k] = h
- } else {
- unsignedHeaders[k] = h
- }
- }
-
- return query, unsignedHeaders
-}
-func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
- var headers []string
- headers = append(headers, "host")
- for k, v := range header {
- canonicalKey := http.CanonicalHeaderKey(k)
- if !r.IsValid(canonicalKey) {
- continue // ignored header
- }
- if ctx.SignedHeaderVals == nil {
- ctx.SignedHeaderVals = make(http.Header)
- }
-
- lowerCaseKey := strings.ToLower(k)
- if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
- // include additional values
- ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
- continue
- }
-
- headers = append(headers, lowerCaseKey)
- ctx.SignedHeaderVals[lowerCaseKey] = v
- }
- sort.Strings(headers)
-
- ctx.signedHeaders = strings.Join(headers, ";")
-
- if ctx.isPresign {
- ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
- }
-
- headerValues := make([]string, len(headers))
- for i, k := range headers {
- if k == "host" {
- headerValues[i] = "host:" + ctx.Request.URL.Host
- } else {
- headerValues[i] = k + ":" +
- strings.Join(ctx.SignedHeaderVals[k], ",")
- }
- }
-
- ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
-}
-
-func (ctx *signingCtx) buildCanonicalString() {
- ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
-
- uri := getURIPath(ctx.Request.URL)
-
- if !ctx.DisableURIPathEscaping {
- uri = rest.EscapePath(uri, false)
- }
-
- ctx.canonicalString = strings.Join([]string{
- ctx.Request.Method,
- uri,
- ctx.Request.URL.RawQuery,
- ctx.canonicalHeaders + "\n",
- ctx.signedHeaders,
- ctx.bodyDigest,
- }, "\n")
-}
-
-func (ctx *signingCtx) buildStringToSign() {
- ctx.stringToSign = strings.Join([]string{
- authHeaderPrefix,
- ctx.formattedTime,
- ctx.credentialString,
- hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
- }, "\n")
-}
-
-func (ctx *signingCtx) buildSignature() {
- secret := ctx.credValues.SecretAccessKey
- date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
- region := makeHmac(date, []byte(ctx.Region))
- service := makeHmac(region, []byte(ctx.ServiceName))
- credentials := makeHmac(service, []byte("aws4_request"))
- signature := makeHmac(credentials, []byte(ctx.stringToSign))
- ctx.signature = hex.EncodeToString(signature)
-}
-
-func (ctx *signingCtx) buildBodyDigest() {
- hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
- if hash == "" {
- if ctx.isPresign && ctx.ServiceName == "s3" {
- hash = "UNSIGNED-PAYLOAD"
- } else if ctx.Body == nil {
- hash = emptyStringSHA256
- } else {
- hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
- }
- if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
- ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
- }
- }
- ctx.bodyDigest = hash
-}
-
-// isRequestSigned returns if the request is currently signed or presigned
-func (ctx *signingCtx) isRequestSigned() bool {
- if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
- return true
- }
- if ctx.Request.Header.Get("Authorization") != "" {
- return true
- }
-
- return false
-}
-
-// unsign removes signing flags for both signed and presigned requests.
-func (ctx *signingCtx) removePresign() {
- ctx.Query.Del("X-Amz-Algorithm")
- ctx.Query.Del("X-Amz-Signature")
- ctx.Query.Del("X-Amz-Security-Token")
- ctx.Query.Del("X-Amz-Date")
- ctx.Query.Del("X-Amz-Expires")
- ctx.Query.Del("X-Amz-Credential")
- ctx.Query.Del("X-Amz-SignedHeaders")
-}
-
-func makeHmac(key []byte, data []byte) []byte {
- hash := hmac.New(sha256.New, key)
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-func makeSha256(data []byte) []byte {
- hash := sha256.New()
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-func makeSha256Reader(reader io.ReadSeeker) []byte {
- hash := sha256.New()
- start, _ := reader.Seek(0, 1)
- defer reader.Seek(start, 0)
-
- io.Copy(hash, reader)
- return hash.Sum(nil)
-}
-
-const doubleSpaces = " "
-
-var doubleSpaceBytes = []byte(doubleSpaces)
-
-func stripExcessSpaces(headerVals []string) []string {
- vals := make([]string, len(headerVals))
- for i, str := range headerVals {
- // Trim leading and trailing spaces
- trimmed := strings.TrimSpace(str)
-
- idx := strings.Index(trimmed, doubleSpaces)
- var buf []byte
- for idx > -1 {
- // Multiple adjacent spaces found
- if buf == nil {
- // first time create the buffer
- buf = []byte(trimmed)
- }
-
- stripToIdx := -1
- for j := idx + 1; j < len(buf); j++ {
- if buf[j] != ' ' {
- buf = append(buf[:idx+1], buf[j:]...)
- stripToIdx = j
- break
- }
- }
-
- if stripToIdx >= 0 {
- idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
- if idx >= 0 {
- idx += stripToIdx
- }
- } else {
- idx = -1
- }
- }
-
- if buf != nil {
- vals[i] = string(buf)
- } else {
- vals[i] = trimmed
- }
- }
- return vals
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go
deleted file mode 100644
index cf7a9ac..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go
+++ /dev/null
@@ -1,414 +0,0 @@
-package v4
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting"
-)
-
-func TestStripExcessHeaders(t *testing.T) {
- vals := []string{
- "123",
- "1 2 3",
- " 1 2 3",
- "1 2 3",
- "1 23",
- "1 2 3",
- "1 2 ",
- " 1 2 ",
- }
-
- expected := []string{
- "123",
- "1 2 3",
- "1 2 3",
- "1 2 3",
- "1 23",
- "1 2 3",
- "1 2",
- "1 2",
- }
-
- newVals := stripExcessSpaces(vals)
- for i := 0; i < len(newVals); i++ {
- assert.Equal(t, expected[i], newVals[i], "test: %d", i)
- }
-}
-
-func buildRequest(serviceName, region, body string) (*http.Request, io.ReadSeeker) {
- endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
- reader := strings.NewReader(body)
- req, _ := http.NewRequest("POST", endpoint, reader)
- req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
- req.Header.Add("X-Amz-Target", "prefix.Operation")
- req.Header.Add("Content-Type", "application/x-amz-json-1.0")
- req.Header.Add("Content-Length", string(len(body)))
- req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
- req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
- req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
- return req, reader
-}
-
-func buildSigner() Signer {
- return Signer{
- Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
- }
-}
-
-func removeWS(text string) string {
- text = strings.Replace(text, " ", "", -1)
- text = strings.Replace(text, "\n", "", -1)
- text = strings.Replace(text, "\t", "", -1)
- return text
-}
-
-func assertEqual(t *testing.T, expected, given string) {
- if removeWS(expected) != removeWS(given) {
- t.Errorf("\nExpected: %s\nGiven: %s", expected, given)
- }
-}
-
-func TestPresignRequest(t *testing.T) {
- req, body := buildRequest("dynamodb", "us-east-1", "{}")
-
- signer := buildSigner()
- signer.Presign(req, body, "dynamodb", "us-east-1", 300*time.Second, time.Unix(0, 0))
-
- expectedDate := "19700101T000000Z"
- expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
- expectedSig := "ea7856749041f727690c580569738282e99c79355fe0d8f125d3b5535d2ece83"
- expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
- expectedTarget := "prefix.Operation"
-
- q := req.URL.Query()
- assert.Equal(t, expectedSig, q.Get("X-Amz-Signature"))
- assert.Equal(t, expectedCred, q.Get("X-Amz-Credential"))
- assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders"))
- assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
- assert.Empty(t, q.Get("X-Amz-Meta-Other-Header"))
- assert.Equal(t, expectedTarget, q.Get("X-Amz-Target"))
-}
-
-func TestSignRequest(t *testing.T) {
- req, body := buildRequest("dynamodb", "us-east-1", "{}")
- signer := buildSigner()
- signer.Sign(req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
-
- expectedDate := "19700101T000000Z"
- expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-security-token;x-amz-target, Signature=ea766cabd2ec977d955a3c2bae1ae54f4515d70752f2207618396f20aa85bd21"
-
- q := req.Header
- assert.Equal(t, expectedSig, q.Get("Authorization"))
- assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
-}
-
-func TestSignBodyS3(t *testing.T) {
- req, body := buildRequest("s3", "us-east-1", "hello")
- signer := buildSigner()
- signer.Sign(req, body, "s3", "us-east-1", time.Now())
- hash := req.Header.Get("X-Amz-Content-Sha256")
- assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
-}
-
-func TestSignBodyGlacier(t *testing.T) {
- req, body := buildRequest("glacier", "us-east-1", "hello")
- signer := buildSigner()
- signer.Sign(req, body, "glacier", "us-east-1", time.Now())
- hash := req.Header.Get("X-Amz-Content-Sha256")
- assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
-}
-
-func TestPresignEmptyBodyS3(t *testing.T) {
- req, body := buildRequest("s3", "us-east-1", "hello")
- signer := buildSigner()
- signer.Presign(req, body, "s3", "us-east-1", 5*time.Minute, time.Now())
- hash := req.Header.Get("X-Amz-Content-Sha256")
- assert.Equal(t, "UNSIGNED-PAYLOAD", hash)
-}
-
-func TestSignPrecomputedBodyChecksum(t *testing.T) {
- req, body := buildRequest("dynamodb", "us-east-1", "hello")
- req.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED")
- signer := buildSigner()
- signer.Sign(req, body, "dynamodb", "us-east-1", time.Now())
- hash := req.Header.Get("X-Amz-Content-Sha256")
- assert.Equal(t, "PRECOMPUTED", hash)
-}
-
-func TestAnonymousCredentials(t *testing.T) {
- svc := awstesting.NewClient(&aws.Config{Credentials: credentials.AnonymousCredentials})
- r := svc.NewRequest(
- &request.Operation{
- Name: "BatchGetItem",
- HTTPMethod: "POST",
- HTTPPath: "/",
- },
- nil,
- nil,
- )
- SignSDKRequest(r)
-
- urlQ := r.HTTPRequest.URL.Query()
- assert.Empty(t, urlQ.Get("X-Amz-Signature"))
- assert.Empty(t, urlQ.Get("X-Amz-Credential"))
- assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders"))
- assert.Empty(t, urlQ.Get("X-Amz-Date"))
-
- hQ := r.HTTPRequest.Header
- assert.Empty(t, hQ.Get("Authorization"))
- assert.Empty(t, hQ.Get("X-Amz-Date"))
-}
-
-func TestIgnoreResignRequestWithValidCreds(t *testing.T) {
- svc := awstesting.NewClient(&aws.Config{
- Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
- Region: aws.String("us-west-2"),
- })
- r := svc.NewRequest(
- &request.Operation{
- Name: "BatchGetItem",
- HTTPMethod: "POST",
- HTTPPath: "/",
- },
- nil,
- nil,
- )
-
- SignSDKRequest(r)
- sig := r.HTTPRequest.Header.Get("Authorization")
-
- signSDKRequestWithCurrTime(r, func() time.Time {
- // Simulate one second has passed so that signature's date changes
- // when it is resigned.
- return time.Now().Add(1 * time.Second)
- })
- assert.NotEqual(t, sig, r.HTTPRequest.Header.Get("Authorization"))
-}
-
-func TestIgnorePreResignRequestWithValidCreds(t *testing.T) {
- svc := awstesting.NewClient(&aws.Config{
- Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
- Region: aws.String("us-west-2"),
- })
- r := svc.NewRequest(
- &request.Operation{
- Name: "BatchGetItem",
- HTTPMethod: "POST",
- HTTPPath: "/",
- },
- nil,
- nil,
- )
- r.ExpireTime = time.Minute * 10
-
- SignSDKRequest(r)
- sig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature")
-
- signSDKRequestWithCurrTime(r, func() time.Time {
- // Simulate one second has passed so that signature's date changes
- // when it is resigned.
- return time.Now().Add(1 * time.Second)
- })
- assert.NotEqual(t, sig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature"))
-}
-
-func TestResignRequestExpiredCreds(t *testing.T) {
- creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
- svc := awstesting.NewClient(&aws.Config{Credentials: creds})
- r := svc.NewRequest(
- &request.Operation{
- Name: "BatchGetItem",
- HTTPMethod: "POST",
- HTTPPath: "/",
- },
- nil,
- nil,
- )
- SignSDKRequest(r)
- querySig := r.HTTPRequest.Header.Get("Authorization")
- var origSignedHeaders string
- for _, p := range strings.Split(querySig, ", ") {
- if strings.HasPrefix(p, "SignedHeaders=") {
- origSignedHeaders = p[len("SignedHeaders="):]
- break
- }
- }
- assert.NotEmpty(t, origSignedHeaders)
- assert.NotContains(t, origSignedHeaders, "authorization")
- origSignedAt := r.LastSignedAt
-
- creds.Expire()
-
- signSDKRequestWithCurrTime(r, func() time.Time {
- // Simulate one second has passed so that signature's date changes
- // when it is resigned.
- return time.Now().Add(1 * time.Second)
- })
- updatedQuerySig := r.HTTPRequest.Header.Get("Authorization")
- assert.NotEqual(t, querySig, updatedQuerySig)
-
- var updatedSignedHeaders string
- for _, p := range strings.Split(updatedQuerySig, ", ") {
- if strings.HasPrefix(p, "SignedHeaders=") {
- updatedSignedHeaders = p[len("SignedHeaders="):]
- break
- }
- }
- assert.NotEmpty(t, updatedSignedHeaders)
- assert.NotContains(t, updatedQuerySig, "authorization")
- assert.NotEqual(t, origSignedAt, r.LastSignedAt)
-}
-
-func TestPreResignRequestExpiredCreds(t *testing.T) {
- provider := &credentials.StaticProvider{Value: credentials.Value{
- AccessKeyID: "AKID",
- SecretAccessKey: "SECRET",
- SessionToken: "SESSION",
- }}
- creds := credentials.NewCredentials(provider)
- svc := awstesting.NewClient(&aws.Config{Credentials: creds})
- r := svc.NewRequest(
- &request.Operation{
- Name: "BatchGetItem",
- HTTPMethod: "POST",
- HTTPPath: "/",
- },
- nil,
- nil,
- )
- r.ExpireTime = time.Minute * 10
-
- SignSDKRequest(r)
- querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature")
- signedHeaders := r.HTTPRequest.URL.Query().Get("X-Amz-SignedHeaders")
- assert.NotEmpty(t, signedHeaders)
- origSignedAt := r.LastSignedAt
-
- creds.Expire()
-
- signSDKRequestWithCurrTime(r, func() time.Time {
- // Simulate the request occurred 15 minutes in the past
- return time.Now().Add(-48 * time.Hour)
- })
- assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature"))
- resignedHeaders := r.HTTPRequest.URL.Query().Get("X-Amz-SignedHeaders")
- assert.Equal(t, signedHeaders, resignedHeaders)
- assert.NotContains(t, signedHeaders, "x-amz-signedHeaders")
- assert.NotEqual(t, origSignedAt, r.LastSignedAt)
-}
-
-func TestResignRequestExpiredRequest(t *testing.T) {
- creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
- svc := awstesting.NewClient(&aws.Config{Credentials: creds})
- r := svc.NewRequest(
- &request.Operation{
- Name: "BatchGetItem",
- HTTPMethod: "POST",
- HTTPPath: "/",
- },
- nil,
- nil,
- )
-
- SignSDKRequest(r)
- querySig := r.HTTPRequest.Header.Get("Authorization")
- origSignedAt := r.LastSignedAt
-
- signSDKRequestWithCurrTime(r, func() time.Time {
- // Simulate the request occurred 15 minutes in the past
- return time.Now().Add(15 * time.Minute)
- })
- assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization"))
- assert.NotEqual(t, origSignedAt, r.LastSignedAt)
-}
-
-func TestSignWithRequestBody(t *testing.T) {
- creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
- signer := NewSigner(creds)
-
- expectBody := []byte("abc123")
-
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- b, err := ioutil.ReadAll(r.Body)
- r.Body.Close()
- assert.NoError(t, err)
- assert.Equal(t, expectBody, b)
- w.WriteHeader(http.StatusOK)
- }))
-
- req, err := http.NewRequest("POST", server.URL, nil)
-
- _, err = signer.Sign(req, bytes.NewReader(expectBody), "service", "region", time.Now())
- assert.NoError(t, err)
-
- resp, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
-}
-
-func TestSignWithRequestBody_Overwrite(t *testing.T) {
- creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
- signer := NewSigner(creds)
-
- var expectBody []byte
-
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- b, err := ioutil.ReadAll(r.Body)
- r.Body.Close()
- assert.NoError(t, err)
- assert.Equal(t, len(expectBody), len(b))
- w.WriteHeader(http.StatusOK)
- }))
-
- req, err := http.NewRequest("GET", server.URL, strings.NewReader("invalid body"))
-
- _, err = signer.Sign(req, nil, "service", "region", time.Now())
- req.ContentLength = 0
-
- assert.NoError(t, err)
-
- resp, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
-}
-
-func BenchmarkPresignRequest(b *testing.B) {
- signer := buildSigner()
- req, body := buildRequest("dynamodb", "us-east-1", "{}")
- for i := 0; i < b.N; i++ {
- signer.Presign(req, body, "dynamodb", "us-east-1", 300*time.Second, time.Now())
- }
-}
-
-func BenchmarkSignRequest(b *testing.B) {
- signer := buildSigner()
- req, body := buildRequest("dynamodb", "us-east-1", "{}")
- for i := 0; i < b.N; i++ {
- signer.Sign(req, body, "dynamodb", "us-east-1", time.Now())
- }
-}
-
-func BenchmarkStripExcessSpaces(b *testing.B) {
- vals := []string{
- `AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`,
- `123 321 123 321`,
- ` 123 321 123 321 `,
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stripExcessSpaces(vals)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
deleted file mode 100644
index fa014b4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/types.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package aws
-
-import (
- "io"
- "sync"
-)
-
-// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
-func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
- return ReaderSeekerCloser{r}
-}
-
-// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
-// io.Closer interfaces to the underlying object if they are available.
-type ReaderSeekerCloser struct {
- r io.Reader
-}
-
-// Read reads from the reader up to size of p. The number of bytes read, and
-// error if it occurred will be returned.
-//
-// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
-//
-// Performs the same functionality as io.Reader Read
-func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
- switch t := r.r.(type) {
- case io.Reader:
- return t.Read(p)
- }
- return 0, nil
-}
-
-// Seek sets the offset for the next Read to offset, interpreted according to
-// whence: 0 means relative to the origin of the file, 1 means relative to the
-// current offset, and 2 means relative to the end. Seek returns the new offset
-// and an error, if any.
-//
-// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
-func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
- switch t := r.r.(type) {
- case io.Seeker:
- return t.Seek(offset, whence)
- }
- return int64(0), nil
-}
-
-// Close closes the ReaderSeekerCloser.
-//
-// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
-func (r ReaderSeekerCloser) Close() error {
- switch t := r.r.(type) {
- case io.Closer:
- return t.Close()
- }
- return nil
-}
-
-// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
-// Can be used with the s3manager.Downloader to download content to a buffer
-// in memory. Safe to use concurrently.
-type WriteAtBuffer struct {
- buf []byte
- m sync.Mutex
-
- // GrowthCoeff defines the growth rate of the internal buffer. By
- // default, the growth rate is 1, where expanding the internal
- // buffer will allocate only enough capacity to fit the new expected
- // length.
- GrowthCoeff float64
-}
-
-// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
-// provided by buf.
-func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
- return &WriteAtBuffer{buf: buf}
-}
-
-// WriteAt writes a slice of bytes to a buffer starting at the position provided
-// The number of bytes written will be returned, or error. Can overwrite previous
-// written slices if the write ats overlap.
-func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
- pLen := len(p)
- expLen := pos + int64(pLen)
- b.m.Lock()
- defer b.m.Unlock()
- if int64(len(b.buf)) < expLen {
- if int64(cap(b.buf)) < expLen {
- if b.GrowthCoeff < 1 {
- b.GrowthCoeff = 1
- }
- newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
- copy(newBuf, b.buf)
- b.buf = newBuf
- }
- b.buf = b.buf[:expLen]
- }
- copy(b.buf[pos:], p)
- return pLen, nil
-}
-
-// Bytes returns a slice of bytes written to the buffer.
-func (b *WriteAtBuffer) Bytes() []byte {
- b.m.Lock()
- defer b.m.Unlock()
- return b.buf[:len(b.buf):len(b.buf)]
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types_test.go b/vendor/github.com/aws/aws-sdk-go/aws/types_test.go
deleted file mode 100644
index a7cd93b..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/types_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package aws
-
-import (
- "math/rand"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestWriteAtBuffer(t *testing.T) {
- b := &WriteAtBuffer{}
-
- n, err := b.WriteAt([]byte{1}, 0)
- assert.NoError(t, err)
- assert.Equal(t, 1, n)
-
- n, err = b.WriteAt([]byte{1, 1, 1}, 5)
- assert.NoError(t, err)
- assert.Equal(t, 3, n)
-
- n, err = b.WriteAt([]byte{2}, 1)
- assert.NoError(t, err)
- assert.Equal(t, 1, n)
-
- n, err = b.WriteAt([]byte{3}, 2)
- assert.NoError(t, err)
- assert.Equal(t, 1, n)
-
- assert.Equal(t, []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes())
-}
-
-func BenchmarkWriteAtBuffer(b *testing.B) {
- buf := &WriteAtBuffer{}
- r := rand.New(rand.NewSource(1))
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- to := r.Intn(10) * 4096
- bs := make([]byte, to)
- buf.WriteAt(bs, r.Int63n(10)*4096)
- }
-}
-
-func BenchmarkWriteAtBufferOrderedWrites(b *testing.B) {
- // test the performance of a WriteAtBuffer when written in an
- // ordered fashion. This is similar to the behavior of the
- // s3.Downloader, since downloads the first chunk of the file, then
- // the second, and so on.
- //
- // This test simulates a 150MB file being written in 30 ordered 5MB chunks.
- chunk := int64(5e6)
- max := chunk * 30
- // we'll write the same 5MB chunk every time
- tmp := make([]byte, chunk)
- for i := 0; i < b.N; i++ {
- buf := &WriteAtBuffer{}
- for i := int64(0); i < max; i += chunk {
- buf.WriteAt(tmp, i)
- }
- }
-}
-
-func BenchmarkWriteAtBufferParallel(b *testing.B) {
- buf := &WriteAtBuffer{}
- r := rand.New(rand.NewSource(1))
-
- b.ResetTimer()
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- to := r.Intn(10) * 4096
- bs := make([]byte, to)
- buf.WriteAt(bs, r.Int63n(10)*4096)
- }
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
deleted file mode 100644
index b01cd70..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Package aws provides core functionality for making requests to AWS services.
-package aws
-
-// SDKName is the name of this AWS SDK
-const SDKName = "aws-sdk-go"
-
-// SDKVersion is the version of this SDK
-const SDKVersion = "1.4.22"
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Godeps.json b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Godeps.json
deleted file mode 100644
index 65d753c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Godeps.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "ImportPath": "github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer",
- "GoVersion": "go1.6",
- "GodepVersion": "v60",
- "Deps": [
- {
- "ImportPath": "golang.org/x/tools/go/ast/astutil",
- "Rev": "b75b3f5cd5d50fbb1fb88ce784d2e7cca17bba8a"
- },
- {
- "ImportPath": "golang.org/x/tools/go/buildutil",
- "Rev": "b75b3f5cd5d50fbb1fb88ce784d2e7cca17bba8a"
- },
- {
- "ImportPath": "golang.org/x/tools/go/loader",
- "Rev": "b75b3f5cd5d50fbb1fb88ce784d2e7cca17bba8a"
- }
- ]
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Readme b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Readme
deleted file mode 100644
index 4cdaa53..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Readme
+++ /dev/null
@@ -1,5 +0,0 @@
-This directory tree is generated automatically by godep.
-
-Please do not edit.
-
-See https://github.com/tools/godep for more information.
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go
deleted file mode 100644
index 286d4bf..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// +build go1.5,deprecated
-
-package main
-
-import (
- "bytes"
- "go/format"
- "io"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "text/template"
-
- "github.com/aws/aws-sdk-go/private/model/api"
-)
-
-type pkg struct {
- oldAPI *api.API
- newAPI *api.API
- shapes map[string]*shapentry
- operations map[string]*opentry
-}
-
-type shapentry struct {
- oldShape *api.Shape
- newShape *api.Shape
-}
-
-type opentry struct {
- oldName string
- newName string
-}
-
-type packageRenames struct {
- Shapes map[string]string
- Operations map[string]string
- Fields map[string]string
-}
-
-var exportMap = map[string]*packageRenames{}
-
-func generateRenames(w io.Writer) error {
- tmpl, err := template.New("renames").Parse(t)
- if err != nil {
- return err
- }
-
- out := bytes.NewBuffer(nil)
- if err = tmpl.Execute(out, exportMap); err != nil {
- return err
- }
-
- b, err := format.Source(bytes.TrimSpace(out.Bytes()))
- if err != nil {
- return err
- }
-
- _, err = io.Copy(w, bytes.NewReader(b))
- return err
-}
-
-const t = `
-package rename
-
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
-
-var renamedPackages = map[string]*packageRenames{
- {{ range $key, $entry := . }}"{{ $key }}": &packageRenames{
- operations: map[string]string{
- {{ range $old, $new := $entry.Operations }}"{{ $old }}": "{{ $new }}",
- {{ end }}
- },
- shapes: map[string]string{
- {{ range $old, $new := $entry.Shapes }}"{{ $old }}": "{{ $new }}",
- {{ end }}
- },
- fields: map[string]string{
- {{ range $old, $new := $entry.Fields }}"{{ $old }}": "{{ $new }}",
- {{ end }}
- },
- },
- {{ end }}
-}
-`
-
-func (p *pkg) buildRenames() {
- pkgName := "github.com/aws/aws-sdk-go/service/" + p.oldAPI.PackageName()
- if exportMap[pkgName] == nil {
- exportMap[pkgName] = &packageRenames{map[string]string{}, map[string]string{}, map[string]string{}}
- }
- ifacename := "github.com/aws/aws-sdk-go/service/" + p.oldAPI.PackageName() + "/" +
- p.oldAPI.InterfacePackageName()
- if exportMap[ifacename] == nil {
- exportMap[ifacename] = &packageRenames{map[string]string{}, map[string]string{}, map[string]string{}}
- }
-
- for _, entry := range p.operations {
- if entry.oldName != entry.newName {
- pkgNames := []string{pkgName, ifacename}
- for _, p := range pkgNames {
- exportMap[p].Operations[entry.oldName] = entry.newName
- exportMap[p].Operations[entry.oldName+"Request"] = entry.newName + "Request"
- exportMap[p].Operations[entry.oldName+"Pages"] = entry.newName + "Pages"
- }
- }
- }
-
- for _, entry := range p.shapes {
- if entry.oldShape.Type == "structure" {
- if entry.oldShape.ShapeName != entry.newShape.ShapeName {
- exportMap[pkgName].Shapes[entry.oldShape.ShapeName] = entry.newShape.ShapeName
- }
-
- for _, n := range entry.oldShape.MemberNames() {
- for _, m := range entry.newShape.MemberNames() {
- if n != m && strings.ToLower(n) == strings.ToLower(m) {
- exportMap[pkgName].Fields[n] = m
- }
- }
- }
- }
- }
-}
-
-func load(file string) *pkg {
- p := &pkg{&api.API{}, &api.API{}, map[string]*shapentry{}, map[string]*opentry{}}
-
- p.oldAPI.Attach(file)
- p.oldAPI.Setup()
-
- p.newAPI.Attach(file)
- p.newAPI.Setup()
-
- for _, name := range p.oldAPI.OperationNames() {
- p.operations[strings.ToLower(name)] = &opentry{oldName: name}
- }
-
- for _, name := range p.newAPI.OperationNames() {
- p.operations[strings.ToLower(name)].newName = name
- }
-
- for _, shape := range p.oldAPI.ShapeList() {
- p.shapes[strings.ToLower(shape.ShapeName)] = &shapentry{oldShape: shape}
- }
-
- for _, shape := range p.newAPI.ShapeList() {
- if _, ok := p.shapes[strings.ToLower(shape.ShapeName)]; !ok {
- panic("missing shape " + shape.ShapeName)
- }
- p.shapes[strings.ToLower(shape.ShapeName)].newShape = shape
- }
-
- return p
-}
-
-var excludeServices = map[string]struct{}{
- "simpledb": {},
- "importexport": {},
-}
-
-func main() {
- files, _ := filepath.Glob("../../apis/*/*/api-2.json")
-
- sort.Strings(files)
-
- // Remove old API versions from list
- m := map[string]bool{}
- for i := range files {
- idx := len(files) - 1 - i
- parts := strings.Split(files[idx], string(filepath.Separator))
- svc := parts[len(parts)-3] // service name is 2nd-to-last component
-
- if m[svc] {
- files[idx] = "" // wipe this one out if we already saw the service
- }
- m[svc] = true
- }
-
- for i := range files {
- file := files[i]
- if file == "" { // empty file
- continue
- }
-
- if g := load(file); g != nil {
- if _, ok := excludeServices[g.oldAPI.PackageName()]; !ok {
- g.buildRenames()
- }
- }
- }
-
- outfile, err := os.Create("rename/renames.go")
- if err != nil {
- panic(err)
- }
- if err := generateRenames(outfile); err != nil {
- panic(err)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go
deleted file mode 100644
index 234ae7c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// +build go1.5,deprecated
-
-package rename
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/format"
- "go/parser"
- "go/token"
- "go/types"
- "io/ioutil"
-
- "golang.org/x/tools/go/loader"
-)
-
-var dryRun = flag.Bool("dryrun", false, "Dry run")
-var verbose = flag.Bool("verbose", false, "Verbose")
-
-type packageRenames struct {
- operations map[string]string
- shapes map[string]string
- fields map[string]string
-}
-
-type renamer struct {
- *loader.Program
- files map[*token.File]bool
-}
-
-// ParsePathsFromArgs parses arguments from command line and looks at import
-// paths to rename objects.
-func ParsePathsFromArgs() {
- flag.Parse()
- for _, dir := range flag.Args() {
- var conf loader.Config
- conf.ParserMode = parser.ParseComments
- conf.ImportWithTests(dir)
- prog, err := conf.Load()
- if err != nil {
- panic(err)
- }
-
- r := renamer{prog, map[*token.File]bool{}}
- r.parse()
- if !*dryRun {
- r.write()
- }
- }
-}
-
-func (r *renamer) dryInfo() string {
- if *dryRun {
- return "[DRY-RUN]"
- }
- return "[!]"
-}
-
-func (r *renamer) printf(msg string, args ...interface{}) {
- if *verbose {
- fmt.Printf(msg, args...)
- }
-}
-
-func (r *renamer) parse() {
- for _, pkg := range r.InitialPackages() {
- r.parseUses(pkg)
- }
-}
-
-func (r *renamer) write() {
- for _, pkg := range r.InitialPackages() {
- for _, f := range pkg.Files {
- tokenFile := r.Fset.File(f.Pos())
- if r.files[tokenFile] {
- var buf bytes.Buffer
- format.Node(&buf, r.Fset, f)
- if err := ioutil.WriteFile(tokenFile.Name(), buf.Bytes(), 0644); err != nil {
- panic(err)
- }
- }
- }
- }
-}
-
-func (r *renamer) parseUses(pkg *loader.PackageInfo) {
- for k, v := range pkg.Uses {
- if v.Pkg() != nil {
- pkgPath := v.Pkg().Path()
- if renames, ok := renamedPackages[pkgPath]; ok {
- name := k.Name
- switch t := v.(type) {
- case *types.Func:
- if newName, ok := renames.operations[t.Name()]; ok && newName != name {
- r.printf("%s Rename [OPERATION]: %q -> %q\n", r.dryInfo(), name, newName)
- r.files[r.Fset.File(k.Pos())] = true
- k.Name = newName
- }
- case *types.TypeName:
- if newName, ok := renames.shapes[name]; ok && newName != name {
- r.printf("%s Rename [SHAPE]: %q -> %q\n", r.dryInfo(), t.Name(), newName)
- r.files[r.Fset.File(k.Pos())] = true
- k.Name = newName
- }
- case *types.Var:
- if newName, ok := renames.fields[name]; ok && newName != name {
- r.printf("%s Rename [FIELD]: %q -> %q\n", r.dryInfo(), t.Name(), newName)
- r.files[r.Fset.File(k.Pos())] = true
- k.Name = newName
- }
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go
deleted file mode 100644
index 55c7b4e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go
+++ /dev/null
@@ -1,2120 +0,0 @@
-// +build go1.5,deprecated
-
-package rename
-
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
-
-var renamedPackages = map[string]*packageRenames{
- "github.com/aws/aws-sdk-go/service/autoscaling": {
- operations: map[string]string{},
- shapes: map[string]string{
- "EBS": "Ebs",
- },
- fields: map[string]string{
- "ActivityID": "ActivityId",
- "ActivityIDs": "ActivityIds",
- "AssociatePublicIPAddress": "AssociatePublicIpAddress",
- "ClassicLinkVPCID": "ClassicLinkVPCId",
- "EBS": "Ebs",
- "EBSOptimized": "EbsOptimized",
- "IAMInstanceProfile": "IamInstanceProfile",
- "IOPS": "Iops",
- "ImageID": "ImageId",
- "InstanceID": "InstanceId",
- "InstanceIDs": "InstanceIds",
- "KernelID": "KernelId",
- "RAMDiskID": "RamdiskId",
- "ResourceID": "ResourceId",
- "SnapshotID": "SnapshotId",
- },
- },
- "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cloudformation": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "EventID": "EventId",
- "LogicalResourceID": "LogicalResourceId",
- "PhysicalResourceID": "PhysicalResourceId",
- "StackID": "StackId",
- "URL": "Url",
- "UniqueID": "UniqueId",
- },
- },
- "github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cloudfront": {
- operations: map[string]string{},
- shapes: map[string]string{
- "KeyPairIDs": "KeyPairIds",
- },
- fields: map[string]string{
- "AWSAccountNumber": "AwsAccountNumber",
- "DistributionID": "DistributionId",
- "IAMCertificateID": "IAMCertificateId",
- "ID": "Id",
- "KeyPairIDs": "KeyPairIds",
- "S3CanonicalUserID": "S3CanonicalUserId",
- "TargetOriginID": "TargetOriginId",
- },
- },
- "github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cloudhsm": {
- operations: map[string]string{
- "CreateHAPG": "CreateHapg",
- "CreateHAPGPages": "CreateHapgPages",
- "CreateHAPGRequest": "CreateHapgRequest",
- "CreateHSM": "CreateHsm",
- "CreateHSMPages": "CreateHsmPages",
- "CreateHSMRequest": "CreateHsmRequest",
- "DeleteHAPG": "DeleteHapg",
- "DeleteHAPGPages": "DeleteHapgPages",
- "DeleteHAPGRequest": "DeleteHapgRequest",
- "DeleteHSM": "DeleteHsm",
- "DeleteHSMPages": "DeleteHsmPages",
- "DeleteHSMRequest": "DeleteHsmRequest",
- "DescribeHAPG": "DescribeHapg",
- "DescribeHAPGPages": "DescribeHapgPages",
- "DescribeHAPGRequest": "DescribeHapgRequest",
- "DescribeHSM": "DescribeHsm",
- "DescribeHSMPages": "DescribeHsmPages",
- "DescribeHSMRequest": "DescribeHsmRequest",
- "ListHSMs": "ListHsms",
- "ListHSMsPages": "ListHsmsPages",
- "ListHSMsRequest": "ListHsmsRequest",
- "ModifyHAPG": "ModifyHapg",
- "ModifyHAPGPages": "ModifyHapgPages",
- "ModifyHAPGRequest": "ModifyHapgRequest",
- "ModifyHSM": "ModifyHsm",
- "ModifyHSMPages": "ModifyHsmPages",
- "ModifyHSMRequest": "ModifyHsmRequest",
- },
- shapes: map[string]string{
- "CreateHAPGInput": "CreateHapgInput",
- "CreateHAPGOutput": "CreateHapgOutput",
- "CreateHSMInput": "CreateHsmInput",
- "CreateHSMOutput": "CreateHsmOutput",
- "DeleteHAPGInput": "DeleteHapgInput",
- "DeleteHAPGOutput": "DeleteHapgOutput",
- "DeleteHSMInput": "DeleteHsmInput",
- "DeleteHSMOutput": "DeleteHsmOutput",
- "DescribeHAPGInput": "DescribeHapgInput",
- "DescribeHAPGOutput": "DescribeHapgOutput",
- "DescribeHSMInput": "DescribeHsmInput",
- "DescribeHSMOutput": "DescribeHsmOutput",
- "ListHSMsInput": "ListHsmsInput",
- "ListHSMsOutput": "ListHsmsOutput",
- "ModifyHAPGInput": "ModifyHapgInput",
- "ModifyHAPGOutput": "ModifyHapgOutput",
- "ModifyHSMInput": "ModifyHsmInput",
- "ModifyHSMOutput": "ModifyHsmOutput",
- },
- fields: map[string]string{
- "ClientARN": "ClientArn",
- "ENIID": "EniId",
- "ENIIP": "EniIp",
- "ExternalID": "ExternalId",
- "HAPGARN": "HapgArn",
- "HAPGList": "HapgList",
- "HAPGSerial": "HapgSerial",
- "HSMARN": "HsmArn",
- "HSMList": "HsmList",
- "HSMSerialNumber": "HsmSerialNumber",
- "HSMType": "HsmType",
- "HSMsLastActionFailed": "HsmsLastActionFailed",
- "HSMsPendingDeletion": "HsmsPendingDeletion",
- "HSMsPendingRegistration": "HsmsPendingRegistration",
- "IAMRoleARN": "IamRoleArn",
- "SSHKey": "SshKey",
- "SSHKeyLastUpdated": "SshKeyLastUpdated",
- "SSHPublicKey": "SshPublicKey",
- "ServerCertURI": "ServerCertUri",
- "SubnetID": "SubnetId",
- "SyslogIP": "SyslogIp",
- "VPCID": "VpcId",
- },
- },
- "github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface": {
- operations: map[string]string{
- "CreateHAPG": "CreateHapg",
- "CreateHAPGPages": "CreateHapgPages",
- "CreateHAPGRequest": "CreateHapgRequest",
- "CreateHSM": "CreateHsm",
- "CreateHSMPages": "CreateHsmPages",
- "CreateHSMRequest": "CreateHsmRequest",
- "DeleteHAPG": "DeleteHapg",
- "DeleteHAPGPages": "DeleteHapgPages",
- "DeleteHAPGRequest": "DeleteHapgRequest",
- "DeleteHSM": "DeleteHsm",
- "DeleteHSMPages": "DeleteHsmPages",
- "DeleteHSMRequest": "DeleteHsmRequest",
- "DescribeHAPG": "DescribeHapg",
- "DescribeHAPGPages": "DescribeHapgPages",
- "DescribeHAPGRequest": "DescribeHapgRequest",
- "DescribeHSM": "DescribeHsm",
- "DescribeHSMPages": "DescribeHsmPages",
- "DescribeHSMRequest": "DescribeHsmRequest",
- "ListHSMs": "ListHsms",
- "ListHSMsPages": "ListHsmsPages",
- "ListHSMsRequest": "ListHsmsRequest",
- "ModifyHAPG": "ModifyHapg",
- "ModifyHAPGPages": "ModifyHapgPages",
- "ModifyHAPGRequest": "ModifyHapgRequest",
- "ModifyHSM": "ModifyHsm",
- "ModifyHSMPages": "ModifyHsmPages",
- "ModifyHSMRequest": "ModifyHsmRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cloudsearch": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "DomainID": "DomainId",
- },
- },
- "github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cloudsearchdomain": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ID": "Id",
- "RID": "Rid",
- "TimeMS": "Timems",
- },
- },
- "github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cloudtrail": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "CloudWatchLogsLogGroupARN": "CloudWatchLogsLogGroupArn",
- "CloudWatchLogsRoleARN": "CloudWatchLogsRoleArn",
- "EventID": "EventId",
- "SNSTopicName": "SnsTopicName",
- },
- },
- "github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cloudwatch": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "AlarmARN": "AlarmArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cloudwatchlogs": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ARN": "Arn",
- "DestinationARN": "DestinationArn",
- "EventID": "EventId",
- "RoleARN": "RoleArn",
- "TargetARN": "TargetArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/codecommit": {
- operations: map[string]string{},
- shapes: map[string]string{
- "RepositoryNameIDPair": "RepositoryNameIdPair",
- },
- fields: map[string]string{
- "ARN": "Arn",
- "AccountID": "AccountId",
- "CloneURLHTTP": "CloneUrlHttp",
- "CloneURLSSH": "CloneUrlSsh",
- "CommitID": "CommitId",
- "RepositoryID": "RepositoryId",
- },
- },
- "github.com/aws/aws-sdk-go/service/codecommit/codecommitiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/codedeploy": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ApplicationID": "ApplicationId",
- "CommitID": "CommitId",
- "DeploymentConfigID": "DeploymentConfigId",
- "DeploymentGroupID": "DeploymentGroupId",
- "DeploymentID": "DeploymentId",
- "DeploymentIDs": "DeploymentIds",
- "EC2TagFilters": "Ec2TagFilters",
- "IAMUserARN": "IamUserArn",
- "InstanceARN": "InstanceArn",
- "InstanceID": "InstanceId",
- "ServiceRoleARN": "ServiceRoleArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/codepipeline": {
- operations: map[string]string{},
- shapes: map[string]string{
- "ActionTypeID": "ActionTypeId",
- },
- fields: map[string]string{
- "AccessKeyID": "AccessKeyId",
- "AccountID": "AccountId",
- "ActionTypeID": "ActionTypeId",
- "ClientID": "ClientId",
- "EntityURL": "EntityUrl",
- "EntityURLTemplate": "EntityUrlTemplate",
- "ExecutionURLTemplate": "ExecutionUrlTemplate",
- "ExternalExecutionID": "ExternalExecutionId",
- "ExternalExecutionURL": "ExternalExecutionUrl",
- "ID": "Id",
- "JobID": "JobId",
- "PipelineExecutionID": "PipelineExecutionId",
- "RevisionChangeID": "RevisionChangeId",
- "RevisionID": "RevisionId",
- "RevisionURL": "RevisionUrl",
- "RevisionURLTemplate": "RevisionUrlTemplate",
- "RoleARN": "RoleArn",
- "ThirdPartyConfigurationURL": "ThirdPartyConfigurationUrl",
- },
- },
- "github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cognitoidentity": {
- operations: map[string]string{
- "GetID": "GetId",
- "GetIDPages": "GetIdPages",
- "GetIDRequest": "GetIdRequest",
- "GetOpenIDToken": "GetOpenIdToken",
- "GetOpenIDTokenForDeveloperIdentity": "GetOpenIdTokenForDeveloperIdentity",
- "GetOpenIDTokenForDeveloperIdentityPages": "GetOpenIdTokenForDeveloperIdentityPages",
- "GetOpenIDTokenForDeveloperIdentityRequest": "GetOpenIdTokenForDeveloperIdentityRequest",
- "GetOpenIDTokenPages": "GetOpenIdTokenPages",
- "GetOpenIDTokenRequest": "GetOpenIdTokenRequest",
- },
- shapes: map[string]string{
- "GetIDInput": "GetIdInput",
- "GetIDOutput": "GetIdOutput",
- "GetOpenIDTokenForDeveloperIdentityInput": "GetOpenIdTokenForDeveloperIdentityInput",
- "GetOpenIDTokenForDeveloperIdentityOutput": "GetOpenIdTokenForDeveloperIdentityOutput",
- "GetOpenIDTokenInput": "GetOpenIdTokenInput",
- "GetOpenIDTokenOutput": "GetOpenIdTokenOutput",
- "UnprocessedIdentityID": "UnprocessedIdentityId",
- },
- fields: map[string]string{
- "AccessKeyID": "AccessKeyId",
- "AccountID": "AccountId",
- "IdentityID": "IdentityId",
- "IdentityIDsToDelete": "IdentityIdsToDelete",
- "IdentityPoolID": "IdentityPoolId",
- "OpenIDConnectProviderARNs": "OpenIdConnectProviderARNs",
- "UnprocessedIdentityIDs": "UnprocessedIdentityIds",
- },
- },
- "github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface": {
- operations: map[string]string{
- "GetID": "GetId",
- "GetIDPages": "GetIdPages",
- "GetIDRequest": "GetIdRequest",
- "GetOpenIDToken": "GetOpenIdToken",
- "GetOpenIDTokenForDeveloperIdentity": "GetOpenIdTokenForDeveloperIdentity",
- "GetOpenIDTokenForDeveloperIdentityPages": "GetOpenIdTokenForDeveloperIdentityPages",
- "GetOpenIDTokenForDeveloperIdentityRequest": "GetOpenIdTokenForDeveloperIdentityRequest",
- "GetOpenIDTokenPages": "GetOpenIdTokenPages",
- "GetOpenIDTokenRequest": "GetOpenIdTokenRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/cognitosync": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ApplicationARNs": "ApplicationArns",
- "DeviceID": "DeviceId",
- "IdentityID": "IdentityId",
- "IdentityPoolID": "IdentityPoolId",
- "RoleARN": "RoleArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/configservice": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ARN": "Arn",
- "AccountID": "AccountId",
- "ConfigSnapshotID": "ConfigSnapshotId",
- "ConfigurationStateID": "ConfigurationStateId",
- "ResourceID": "ResourceId",
- "SNSTopicARN": "SnsTopicARN",
- },
- },
- "github.com/aws/aws-sdk-go/service/configservice/configserviceiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/datapipeline": {
- operations: map[string]string{},
- shapes: map[string]string{
- "PipelineIDName": "PipelineIdName",
- },
- fields: map[string]string{
- "AttemptID": "AttemptId",
- "ErrorID": "ErrorId",
- "ID": "Id",
- "IDs": "Ids",
- "ObjectID": "ObjectId",
- "ObjectIDs": "ObjectIds",
- "PipelineID": "PipelineId",
- "PipelineIDList": "PipelineIdList",
- "PipelineIDs": "PipelineIds",
- "TaskID": "TaskId",
- "TaskRunnerID": "TaskrunnerId",
- "UniqueID": "UniqueId",
- },
- },
- "github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/devicefarm": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ARN": "Arn",
- "AWSAccountNumber": "AwsAccountNumber",
- "AppARN": "AppArn",
- "CPU": "Cpu",
- "DevicePoolARN": "DevicePoolArn",
- "ExtraDataPackageARN": "ExtraDataPackageArn",
- "NetworkProfileARN": "NetworkProfileArn",
- "ProjectARN": "ProjectArn",
- "TestPackageARN": "TestPackageArn",
- "URL": "Url",
- },
- },
- "github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/directconnect": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ASN": "Asn",
- "CIDR": "Cidr",
- "ConnectionID": "ConnectionId",
- "InterconnectID": "InterconnectId",
- "VLAN": "Vlan",
- "VirtualGatewayID": "VirtualGatewayId",
- "VirtualInterfaceID": "VirtualInterfaceId",
- },
- },
- "github.com/aws/aws-sdk-go/service/directconnect/directconnectiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/directoryservice": {
- operations: map[string]string{
- "DisableSSO": "DisableSso",
- "DisableSSOPages": "DisableSsoPages",
- "DisableSSORequest": "DisableSsoRequest",
- "EnableSSO": "EnableSso",
- "EnableSSOPages": "EnableSsoPages",
- "EnableSSORequest": "EnableSsoRequest",
- },
- shapes: map[string]string{
- "DirectoryVPCSettings": "DirectoryVpcSettings",
- "DirectoryVPCSettingsDescription": "DirectoryVpcSettingsDescription",
- "DisableSSOInput": "DisableSsoInput",
- "DisableSSOOutput": "DisableSsoOutput",
- "EnableSSOInput": "EnableSsoInput",
- "EnableSSOOutput": "EnableSsoOutput",
- },
- fields: map[string]string{
- "AccessURL": "AccessUrl",
- "ComputerID": "ComputerId",
- "ConnectIPs": "ConnectIps",
- "CustomerDNSIPs": "CustomerDnsIps",
- "DNSIPAddrs": "DnsIpAddrs",
- "DirectoryID": "DirectoryId",
- "DirectoryIDs": "DirectoryIds",
- "SSOEnabled": "SsoEnabled",
- "SecurityGroupID": "SecurityGroupId",
- "SnapshotID": "SnapshotId",
- "SnapshotIDs": "SnapshotIds",
- "SubnetIDs": "SubnetIds",
- "VPCID": "VpcId",
- "VPCSettings": "VpcSettings",
- },
- },
- "github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface": {
- operations: map[string]string{
- "DisableSSO": "DisableSso",
- "DisableSSOPages": "DisableSsoPages",
- "DisableSSORequest": "DisableSsoRequest",
- "EnableSSO": "EnableSso",
- "EnableSSOPages": "EnableSsoPages",
- "EnableSSORequest": "EnableSsoRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/dynamodb": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "IndexARN": "IndexArn",
- "LatestStreamARN": "LatestStreamArn",
- "TableARN": "TableArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/dynamodbstreams": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "AWSRegion": "AwsRegion",
- "DynamoDB": "Dynamodb",
- "ExclusiveStartShardID": "ExclusiveStartShardId",
- "ExclusiveStartStreamARN": "ExclusiveStartStreamArn",
- "LastEvaluatedShardID": "LastEvaluatedShardId",
- "LastEvaluatedStreamARN": "LastEvaluatedStreamArn",
- "ParentShardID": "ParentShardId",
- "ShardID": "ShardId",
- "StreamARN": "StreamArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/ec2": {
- operations: map[string]string{
- "AcceptVPCPeeringConnection": "AcceptVpcPeeringConnection",
- "AcceptVPCPeeringConnectionPages": "AcceptVpcPeeringConnectionPages",
- "AcceptVPCPeeringConnectionRequest": "AcceptVpcPeeringConnectionRequest",
- "AssignPrivateIPAddresses": "AssignPrivateIpAddresses",
- "AssignPrivateIPAddressesPages": "AssignPrivateIpAddressesPages",
- "AssignPrivateIPAddressesRequest": "AssignPrivateIpAddressesRequest",
- "AssociateDHCPOptions": "AssociateDhcpOptions",
- "AssociateDHCPOptionsPages": "AssociateDhcpOptionsPages",
- "AssociateDHCPOptionsRequest": "AssociateDhcpOptionsRequest",
- "AttachClassicLinkVPC": "AttachClassicLinkVpc",
- "AttachClassicLinkVPCPages": "AttachClassicLinkVpcPages",
- "AttachClassicLinkVPCRequest": "AttachClassicLinkVpcRequest",
- "AttachVPNGateway": "AttachVpnGateway",
- "AttachVPNGatewayPages": "AttachVpnGatewayPages",
- "AttachVPNGatewayRequest": "AttachVpnGatewayRequest",
- "CreateDHCPOptions": "CreateDhcpOptions",
- "CreateDHCPOptionsPages": "CreateDhcpOptionsPages",
- "CreateDHCPOptionsRequest": "CreateDhcpOptionsRequest",
- "CreateNetworkACL": "CreateNetworkAcl",
- "CreateNetworkACLEntry": "CreateNetworkAclEntry",
- "CreateNetworkACLEntryPages": "CreateNetworkAclEntryPages",
- "CreateNetworkACLEntryRequest": "CreateNetworkAclEntryRequest",
- "CreateNetworkACLPages": "CreateNetworkAclPages",
- "CreateNetworkACLRequest": "CreateNetworkAclRequest",
- "CreateVPC": "CreateVpc",
- "CreateVPCEndpoint": "CreateVpcEndpoint",
- "CreateVPCEndpointPages": "CreateVpcEndpointPages",
- "CreateVPCEndpointRequest": "CreateVpcEndpointRequest",
- "CreateVPCPages": "CreateVpcPages",
- "CreateVPCPeeringConnection": "CreateVpcPeeringConnection",
- "CreateVPCPeeringConnectionPages": "CreateVpcPeeringConnectionPages",
- "CreateVPCPeeringConnectionRequest": "CreateVpcPeeringConnectionRequest",
- "CreateVPCRequest": "CreateVpcRequest",
- "CreateVPNConnection": "CreateVpnConnection",
- "CreateVPNConnectionPages": "CreateVpnConnectionPages",
- "CreateVPNConnectionRequest": "CreateVpnConnectionRequest",
- "CreateVPNConnectionRoute": "CreateVpnConnectionRoute",
- "CreateVPNConnectionRoutePages": "CreateVpnConnectionRoutePages",
- "CreateVPNConnectionRouteRequest": "CreateVpnConnectionRouteRequest",
- "CreateVPNGateway": "CreateVpnGateway",
- "CreateVPNGatewayPages": "CreateVpnGatewayPages",
- "CreateVPNGatewayRequest": "CreateVpnGatewayRequest",
- "DeleteDHCPOptions": "DeleteDhcpOptions",
- "DeleteDHCPOptionsPages": "DeleteDhcpOptionsPages",
- "DeleteDHCPOptionsRequest": "DeleteDhcpOptionsRequest",
- "DeleteNetworkACL": "DeleteNetworkAcl",
- "DeleteNetworkACLEntry": "DeleteNetworkAclEntry",
- "DeleteNetworkACLEntryPages": "DeleteNetworkAclEntryPages",
- "DeleteNetworkACLEntryRequest": "DeleteNetworkAclEntryRequest",
- "DeleteNetworkACLPages": "DeleteNetworkAclPages",
- "DeleteNetworkACLRequest": "DeleteNetworkAclRequest",
- "DeleteVPC": "DeleteVpc",
- "DeleteVPCEndpoints": "DeleteVpcEndpoints",
- "DeleteVPCEndpointsPages": "DeleteVpcEndpointsPages",
- "DeleteVPCEndpointsRequest": "DeleteVpcEndpointsRequest",
- "DeleteVPCPages": "DeleteVpcPages",
- "DeleteVPCPeeringConnection": "DeleteVpcPeeringConnection",
- "DeleteVPCPeeringConnectionPages": "DeleteVpcPeeringConnectionPages",
- "DeleteVPCPeeringConnectionRequest": "DeleteVpcPeeringConnectionRequest",
- "DeleteVPCRequest": "DeleteVpcRequest",
- "DeleteVPNConnection": "DeleteVpnConnection",
- "DeleteVPNConnectionPages": "DeleteVpnConnectionPages",
- "DeleteVPNConnectionRequest": "DeleteVpnConnectionRequest",
- "DeleteVPNConnectionRoute": "DeleteVpnConnectionRoute",
- "DeleteVPNConnectionRoutePages": "DeleteVpnConnectionRoutePages",
- "DeleteVPNConnectionRouteRequest": "DeleteVpnConnectionRouteRequest",
- "DeleteVPNGateway": "DeleteVpnGateway",
- "DeleteVPNGatewayPages": "DeleteVpnGatewayPages",
- "DeleteVPNGatewayRequest": "DeleteVpnGatewayRequest",
- "DescribeDHCPOptions": "DescribeDhcpOptions",
- "DescribeDHCPOptionsPages": "DescribeDhcpOptionsPages",
- "DescribeDHCPOptionsRequest": "DescribeDhcpOptionsRequest",
- "DescribeNetworkACLs": "DescribeNetworkAcls",
- "DescribeNetworkACLsPages": "DescribeNetworkAclsPages",
- "DescribeNetworkACLsRequest": "DescribeNetworkAclsRequest",
- "DescribeVPCAttribute": "DescribeVpcAttribute",
- "DescribeVPCAttributePages": "DescribeVpcAttributePages",
- "DescribeVPCAttributeRequest": "DescribeVpcAttributeRequest",
- "DescribeVPCClassicLink": "DescribeVpcClassicLink",
- "DescribeVPCClassicLinkPages": "DescribeVpcClassicLinkPages",
- "DescribeVPCClassicLinkRequest": "DescribeVpcClassicLinkRequest",
- "DescribeVPCEndpointServices": "DescribeVpcEndpointServices",
- "DescribeVPCEndpointServicesPages": "DescribeVpcEndpointServicesPages",
- "DescribeVPCEndpointServicesRequest": "DescribeVpcEndpointServicesRequest",
- "DescribeVPCEndpoints": "DescribeVpcEndpoints",
- "DescribeVPCEndpointsPages": "DescribeVpcEndpointsPages",
- "DescribeVPCEndpointsRequest": "DescribeVpcEndpointsRequest",
- "DescribeVPCPeeringConnections": "DescribeVpcPeeringConnections",
- "DescribeVPCPeeringConnectionsPages": "DescribeVpcPeeringConnectionsPages",
- "DescribeVPCPeeringConnectionsRequest": "DescribeVpcPeeringConnectionsRequest",
- "DescribeVPCs": "DescribeVpcs",
- "DescribeVPCsPages": "DescribeVpcsPages",
- "DescribeVPCsRequest": "DescribeVpcsRequest",
- "DescribeVPNConnections": "DescribeVpnConnections",
- "DescribeVPNConnectionsPages": "DescribeVpnConnectionsPages",
- "DescribeVPNConnectionsRequest": "DescribeVpnConnectionsRequest",
- "DescribeVPNGateways": "DescribeVpnGateways",
- "DescribeVPNGatewaysPages": "DescribeVpnGatewaysPages",
- "DescribeVPNGatewaysRequest": "DescribeVpnGatewaysRequest",
- "DetachClassicLinkVPC": "DetachClassicLinkVpc",
- "DetachClassicLinkVPCPages": "DetachClassicLinkVpcPages",
- "DetachClassicLinkVPCRequest": "DetachClassicLinkVpcRequest",
- "DetachVPNGateway": "DetachVpnGateway",
- "DetachVPNGatewayPages": "DetachVpnGatewayPages",
- "DetachVPNGatewayRequest": "DetachVpnGatewayRequest",
- "DisableVGWRoutePropagation": "DisableVgwRoutePropagation",
- "DisableVGWRoutePropagationPages": "DisableVgwRoutePropagationPages",
- "DisableVGWRoutePropagationRequest": "DisableVgwRoutePropagationRequest",
- "DisableVPCClassicLink": "DisableVpcClassicLink",
- "DisableVPCClassicLinkPages": "DisableVpcClassicLinkPages",
- "DisableVPCClassicLinkRequest": "DisableVpcClassicLinkRequest",
- "EnableVGWRoutePropagation": "EnableVgwRoutePropagation",
- "EnableVGWRoutePropagationPages": "EnableVgwRoutePropagationPages",
- "EnableVGWRoutePropagationRequest": "EnableVgwRoutePropagationRequest",
- "EnableVPCClassicLink": "EnableVpcClassicLink",
- "EnableVPCClassicLinkPages": "EnableVpcClassicLinkPages",
- "EnableVPCClassicLinkRequest": "EnableVpcClassicLinkRequest",
- "ModifyVPCAttribute": "ModifyVpcAttribute",
- "ModifyVPCAttributePages": "ModifyVpcAttributePages",
- "ModifyVPCAttributeRequest": "ModifyVpcAttributeRequest",
- "ModifyVPCEndpoint": "ModifyVpcEndpoint",
- "ModifyVPCEndpointPages": "ModifyVpcEndpointPages",
- "ModifyVPCEndpointRequest": "ModifyVpcEndpointRequest",
- "MoveAddressToVPC": "MoveAddressToVpc",
- "MoveAddressToVPCPages": "MoveAddressToVpcPages",
- "MoveAddressToVPCRequest": "MoveAddressToVpcRequest",
- "RejectVPCPeeringConnection": "RejectVpcPeeringConnection",
- "RejectVPCPeeringConnectionPages": "RejectVpcPeeringConnectionPages",
- "RejectVPCPeeringConnectionRequest": "RejectVpcPeeringConnectionRequest",
- "ReplaceNetworkACLAssociation": "ReplaceNetworkAclAssociation",
- "ReplaceNetworkACLAssociationPages": "ReplaceNetworkAclAssociationPages",
- "ReplaceNetworkACLAssociationRequest": "ReplaceNetworkAclAssociationRequest",
- "ReplaceNetworkACLEntry": "ReplaceNetworkAclEntry",
- "ReplaceNetworkACLEntryPages": "ReplaceNetworkAclEntryPages",
- "ReplaceNetworkACLEntryRequest": "ReplaceNetworkAclEntryRequest",
- "UnassignPrivateIPAddresses": "UnassignPrivateIpAddresses",
- "UnassignPrivateIPAddressesPages": "UnassignPrivateIpAddressesPages",
- "UnassignPrivateIPAddressesRequest": "UnassignPrivateIpAddressesRequest",
- },
- shapes: map[string]string{
- "AcceptVPCPeeringConnectionInput": "AcceptVpcPeeringConnectionInput",
- "AcceptVPCPeeringConnectionOutput": "AcceptVpcPeeringConnectionOutput",
- "AssignPrivateIPAddressesInput": "AssignPrivateIpAddressesInput",
- "AssignPrivateIPAddressesOutput": "AssignPrivateIpAddressesOutput",
- "AssociateDHCPOptionsInput": "AssociateDhcpOptionsInput",
- "AssociateDHCPOptionsOutput": "AssociateDhcpOptionsOutput",
- "AttachClassicLinkVPCInput": "AttachClassicLinkVpcInput",
- "AttachClassicLinkVPCOutput": "AttachClassicLinkVpcOutput",
- "AttachVPNGatewayInput": "AttachVpnGatewayInput",
- "AttachVPNGatewayOutput": "AttachVpnGatewayOutput",
- "CreateDHCPOptionsInput": "CreateDhcpOptionsInput",
- "CreateDHCPOptionsOutput": "CreateDhcpOptionsOutput",
- "CreateNetworkACLEntryInput": "CreateNetworkAclEntryInput",
- "CreateNetworkACLEntryOutput": "CreateNetworkAclEntryOutput",
- "CreateNetworkACLInput": "CreateNetworkAclInput",
- "CreateNetworkACLOutput": "CreateNetworkAclOutput",
- "CreateVPCEndpointInput": "CreateVpcEndpointInput",
- "CreateVPCEndpointOutput": "CreateVpcEndpointOutput",
- "CreateVPCInput": "CreateVpcInput",
- "CreateVPCOutput": "CreateVpcOutput",
- "CreateVPCPeeringConnectionInput": "CreateVpcPeeringConnectionInput",
- "CreateVPCPeeringConnectionOutput": "CreateVpcPeeringConnectionOutput",
- "CreateVPNConnectionInput": "CreateVpnConnectionInput",
- "CreateVPNConnectionOutput": "CreateVpnConnectionOutput",
- "CreateVPNConnectionRouteInput": "CreateVpnConnectionRouteInput",
- "CreateVPNConnectionRouteOutput": "CreateVpnConnectionRouteOutput",
- "CreateVPNGatewayInput": "CreateVpnGatewayInput",
- "CreateVPNGatewayOutput": "CreateVpnGatewayOutput",
- "DHCPConfiguration": "DhcpConfiguration",
- "DHCPOptions": "DhcpOptions",
- "DeleteDHCPOptionsInput": "DeleteDhcpOptionsInput",
- "DeleteDHCPOptionsOutput": "DeleteDhcpOptionsOutput",
- "DeleteNetworkACLEntryInput": "DeleteNetworkAclEntryInput",
- "DeleteNetworkACLEntryOutput": "DeleteNetworkAclEntryOutput",
- "DeleteNetworkACLInput": "DeleteNetworkAclInput",
- "DeleteNetworkACLOutput": "DeleteNetworkAclOutput",
- "DeleteVPCEndpointsInput": "DeleteVpcEndpointsInput",
- "DeleteVPCEndpointsOutput": "DeleteVpcEndpointsOutput",
- "DeleteVPCInput": "DeleteVpcInput",
- "DeleteVPCOutput": "DeleteVpcOutput",
- "DeleteVPCPeeringConnectionInput": "DeleteVpcPeeringConnectionInput",
- "DeleteVPCPeeringConnectionOutput": "DeleteVpcPeeringConnectionOutput",
- "DeleteVPNConnectionInput": "DeleteVpnConnectionInput",
- "DeleteVPNConnectionOutput": "DeleteVpnConnectionOutput",
- "DeleteVPNConnectionRouteInput": "DeleteVpnConnectionRouteInput",
- "DeleteVPNConnectionRouteOutput": "DeleteVpnConnectionRouteOutput",
- "DeleteVPNGatewayInput": "DeleteVpnGatewayInput",
- "DeleteVPNGatewayOutput": "DeleteVpnGatewayOutput",
- "DescribeDHCPOptionsInput": "DescribeDhcpOptionsInput",
- "DescribeDHCPOptionsOutput": "DescribeDhcpOptionsOutput",
- "DescribeNetworkACLsInput": "DescribeNetworkAclsInput",
- "DescribeNetworkACLsOutput": "DescribeNetworkAclsOutput",
- "DescribeVPCAttributeInput": "DescribeVpcAttributeInput",
- "DescribeVPCAttributeOutput": "DescribeVpcAttributeOutput",
- "DescribeVPCClassicLinkInput": "DescribeVpcClassicLinkInput",
- "DescribeVPCClassicLinkOutput": "DescribeVpcClassicLinkOutput",
- "DescribeVPCEndpointServicesInput": "DescribeVpcEndpointServicesInput",
- "DescribeVPCEndpointServicesOutput": "DescribeVpcEndpointServicesOutput",
- "DescribeVPCEndpointsInput": "DescribeVpcEndpointsInput",
- "DescribeVPCEndpointsOutput": "DescribeVpcEndpointsOutput",
- "DescribeVPCPeeringConnectionsInput": "DescribeVpcPeeringConnectionsInput",
- "DescribeVPCPeeringConnectionsOutput": "DescribeVpcPeeringConnectionsOutput",
- "DescribeVPCsInput": "DescribeVpcsInput",
- "DescribeVPCsOutput": "DescribeVpcsOutput",
- "DescribeVPNConnectionsInput": "DescribeVpnConnectionsInput",
- "DescribeVPNConnectionsOutput": "DescribeVpnConnectionsOutput",
- "DescribeVPNGatewaysInput": "DescribeVpnGatewaysInput",
- "DescribeVPNGatewaysOutput": "DescribeVpnGatewaysOutput",
- "DetachClassicLinkVPCInput": "DetachClassicLinkVpcInput",
- "DetachClassicLinkVPCOutput": "DetachClassicLinkVpcOutput",
- "DetachVPNGatewayInput": "DetachVpnGatewayInput",
- "DetachVPNGatewayOutput": "DetachVpnGatewayOutput",
- "DisableVGWRoutePropagationInput": "DisableVgwRoutePropagationInput",
- "DisableVGWRoutePropagationOutput": "DisableVgwRoutePropagationOutput",
- "DisableVPCClassicLinkInput": "DisableVpcClassicLinkInput",
- "DisableVPCClassicLinkOutput": "DisableVpcClassicLinkOutput",
- "EBSBlockDevice": "EbsBlockDevice",
- "EBSInstanceBlockDevice": "EbsInstanceBlockDevice",
- "EBSInstanceBlockDeviceSpecification": "EbsInstanceBlockDeviceSpecification",
- "EnableVGWRoutePropagationInput": "EnableVgwRoutePropagationInput",
- "EnableVGWRoutePropagationOutput": "EnableVgwRoutePropagationOutput",
- "EnableVPCClassicLinkInput": "EnableVpcClassicLinkInput",
- "EnableVPCClassicLinkOutput": "EnableVpcClassicLinkOutput",
- "IAMInstanceProfile": "IamInstanceProfile",
- "IAMInstanceProfileSpecification": "IamInstanceProfileSpecification",
- "ICMPTypeCode": "IcmpTypeCode",
- "IPPermission": "IpPermission",
- "IPRange": "IpRange",
- "InstancePrivateIPAddress": "InstancePrivateIpAddress",
- "ModifyVPCAttributeInput": "ModifyVpcAttributeInput",
- "ModifyVPCAttributeOutput": "ModifyVpcAttributeOutput",
- "ModifyVPCEndpointInput": "ModifyVpcEndpointInput",
- "ModifyVPCEndpointOutput": "ModifyVpcEndpointOutput",
- "MoveAddressToVPCInput": "MoveAddressToVpcInput",
- "MoveAddressToVPCOutput": "MoveAddressToVpcOutput",
- "NetworkACL": "NetworkAcl",
- "NetworkACLAssociation": "NetworkAclAssociation",
- "NetworkACLEntry": "NetworkAclEntry",
- "NetworkInterfacePrivateIPAddress": "NetworkInterfacePrivateIpAddress",
- "NewDHCPConfiguration": "NewDhcpConfiguration",
- "PrefixListID": "PrefixListId",
- "PrivateIPAddressSpecification": "PrivateIpAddressSpecification",
- "PropagatingVGW": "PropagatingVgw",
- "RejectVPCPeeringConnectionInput": "RejectVpcPeeringConnectionInput",
- "RejectVPCPeeringConnectionOutput": "RejectVpcPeeringConnectionOutput",
- "ReplaceNetworkACLAssociationInput": "ReplaceNetworkAclAssociationInput",
- "ReplaceNetworkACLAssociationOutput": "ReplaceNetworkAclAssociationOutput",
- "ReplaceNetworkACLEntryInput": "ReplaceNetworkAclEntryInput",
- "ReplaceNetworkACLEntryOutput": "ReplaceNetworkAclEntryOutput",
- "ReservedInstancesID": "ReservedInstancesId",
- "UnassignPrivateIPAddressesInput": "UnassignPrivateIpAddressesInput",
- "UnassignPrivateIPAddressesOutput": "UnassignPrivateIpAddressesOutput",
- "UserIDGroupPair": "UserIdGroupPair",
- "VGWTelemetry": "VgwTelemetry",
- "VPC": "Vpc",
- "VPCAttachment": "VpcAttachment",
- "VPCClassicLink": "VpcClassicLink",
- "VPCEndpoint": "VpcEndpoint",
- "VPCPeeringConnection": "VpcPeeringConnection",
- "VPCPeeringConnectionStateReason": "VpcPeeringConnectionStateReason",
- "VPCPeeringConnectionVPCInfo": "VpcPeeringConnectionVpcInfo",
- "VPNConnection": "VpnConnection",
- "VPNConnectionOptions": "VpnConnectionOptions",
- "VPNConnectionOptionsSpecification": "VpnConnectionOptionsSpecification",
- "VPNGateway": "VpnGateway",
- "VPNStaticRoute": "VpnStaticRoute",
- },
- fields: map[string]string{
- "AMILaunchIndex": "AmiLaunchIndex",
- "ARN": "Arn",
- "AWSAccessKeyID": "AWSAccessKeyId",
- "AccepterVPCInfo": "AccepterVpcInfo",
- "AddRouteTableIDs": "AddRouteTableIds",
- "AllocationID": "AllocationId",
- "AllocationIDs": "AllocationIds",
- "AssociatePublicIPAddress": "AssociatePublicIpAddress",
- "AssociationID": "AssociationId",
- "AttachmentID": "AttachmentId",
- "AvailableIPAddressCount": "AvailableIpAddressCount",
- "BGPASN": "BgpAsn",
- "BundleID": "BundleId",
- "BundleIDs": "BundleIds",
- "CIDRBlock": "CidrBlock",
- "CIDRIP": "CidrIp",
- "CIDRs": "Cidrs",
- "ConversionTaskID": "ConversionTaskId",
- "ConversionTaskIDs": "ConversionTaskIds",
- "CustomerGatewayID": "CustomerGatewayId",
- "CustomerGatewayIDs": "CustomerGatewayIds",
- "DHCPConfigurations": "DhcpConfigurations",
- "DHCPOptions": "DhcpOptions",
- "DHCPOptionsID": "DhcpOptionsId",
- "DHCPOptionsIDs": "DhcpOptionsIds",
- "DefaultForAZ": "DefaultForAz",
- "DeliverLogsPermissionARN": "DeliverLogsPermissionArn",
- "DestinationCIDRBlock": "DestinationCidrBlock",
- "DestinationPrefixListID": "DestinationPrefixListId",
- "DisableAPITermination": "DisableApiTermination",
- "EBS": "Ebs",
- "EBSOptimized": "EbsOptimized",
- "EnableDNSHostnames": "EnableDnsHostnames",
- "EnableDNSSupport": "EnableDnsSupport",
- "EventID": "EventId",
- "ExportTaskID": "ExportTaskId",
- "ExportTaskIDs": "ExportTaskIds",
- "FlowLogID": "FlowLogId",
- "FlowLogIDs": "FlowLogIds",
- "GatewayID": "GatewayId",
- "GroupID": "GroupId",
- "GroupIDs": "GroupIds",
- "IAMFleetRole": "IamFleetRole",
- "IAMInstanceProfile": "IamInstanceProfile",
- "ICMPTypeCode": "IcmpTypeCode",
- "ID": "Id",
- "IOPS": "Iops",
- "IPAddress": "IpAddress",
- "IPOwnerID": "IpOwnerId",
- "IPPermissions": "IpPermissions",
- "IPPermissionsEgress": "IpPermissionsEgress",
- "IPProtocol": "IpProtocol",
- "IPRanges": "IpRanges",
- "ImageID": "ImageId",
- "ImageIDs": "ImageIds",
- "ImportManifestURL": "ImportManifestUrl",
- "ImportTaskID": "ImportTaskId",
- "ImportTaskIDs": "ImportTaskIds",
- "InstanceID": "InstanceId",
- "InstanceIDs": "InstanceIds",
- "InstanceOwnerID": "InstanceOwnerId",
- "InternetGatewayID": "InternetGatewayId",
- "InternetGatewayIDs": "InternetGatewayIds",
- "KMSKeyID": "KmsKeyId",
- "KernelID": "KernelId",
- "MACAddress": "MacAddress",
- "MapPublicIPOnLaunch": "MapPublicIpOnLaunch",
- "NetworkACL": "NetworkAcl",
- "NetworkACLAssociationID": "NetworkAclAssociationId",
- "NetworkACLID": "NetworkAclId",
- "NetworkACLIDs": "NetworkAclIds",
- "NetworkACLs": "NetworkAcls",
- "NetworkInterfaceID": "NetworkInterfaceId",
- "NetworkInterfaceIDs": "NetworkInterfaceIds",
- "NetworkInterfaceOwnerID": "NetworkInterfaceOwnerId",
- "NewAssociationID": "NewAssociationId",
- "OutsideIPAddress": "OutsideIpAddress",
- "OwnerID": "OwnerId",
- "OwnerIDs": "OwnerIds",
- "PeerOwnerID": "PeerOwnerId",
- "PeerVPCID": "PeerVpcId",
- "PrefixListID": "PrefixListId",
- "PrefixListIDs": "PrefixListIds",
- "PresignedURL": "PresignedUrl",
- "PrivateDNSName": "PrivateDnsName",
- "PrivateIPAddress": "PrivateIpAddress",
- "PrivateIPAddresses": "PrivateIpAddresses",
- "ProductCodeID": "ProductCodeId",
- "PropagatingVGWs": "PropagatingVgws",
- "PublicDNSName": "PublicDnsName",
- "PublicIP": "PublicIp",
- "PublicIPAddress": "PublicIpAddress",
- "PublicIPs": "PublicIps",
- "RAMDisk": "Ramdisk",
- "RAMDiskID": "RamdiskId",
- "RemoveRouteTableIDs": "RemoveRouteTableIds",
- "RequesterID": "RequesterId",
- "RequesterVPCInfo": "RequesterVpcInfo",
- "ReservationID": "ReservationId",
- "ReservedInstancesID": "ReservedInstancesId",
- "ReservedInstancesIDs": "ReservedInstancesIds",
- "ReservedInstancesListingID": "ReservedInstancesListingId",
- "ReservedInstancesModificationID": "ReservedInstancesModificationId",
- "ReservedInstancesModificationIDs": "ReservedInstancesModificationIds",
- "ReservedInstancesOfferingID": "ReservedInstancesOfferingId",
- "ReservedInstancesOfferingIDs": "ReservedInstancesOfferingIds",
- "ResourceID": "ResourceId",
- "ResourceIDs": "ResourceIds",
- "RestorableByUserIDs": "RestorableByUserIds",
- "RouteTableAssociationID": "RouteTableAssociationId",
- "RouteTableID": "RouteTableId",
- "RouteTableIDs": "RouteTableIds",
- "SRIOVNetSupport": "SriovNetSupport",
- "SecondaryPrivateIPAddressCount": "SecondaryPrivateIpAddressCount",
- "SecurityGroupIDs": "SecurityGroupIds",
- "SnapshotID": "SnapshotId",
- "SnapshotIDs": "SnapshotIds",
- "SourceImageID": "SourceImageId",
- "SourceSecurityGroupOwnerID": "SourceSecurityGroupOwnerId",
- "SourceSnapshotID": "SourceSnapshotId",
- "SpotFleetRequestID": "SpotFleetRequestId",
- "SpotFleetRequestIDs": "SpotFleetRequestIds",
- "SpotInstanceRequestID": "SpotInstanceRequestId",
- "SpotInstanceRequestIDs": "SpotInstanceRequestIds",
- "SubnetID": "SubnetId",
- "SubnetIDs": "SubnetIds",
- "URL": "Url",
- "UserID": "UserId",
- "UserIDGroupPairs": "UserIdGroupPairs",
- "UserIDs": "UserIds",
- "VGWTelemetry": "VgwTelemetry",
- "VPC": "Vpc",
- "VPCAttachment": "VpcAttachment",
- "VPCAttachments": "VpcAttachments",
- "VPCEndpoint": "VpcEndpoint",
- "VPCEndpointID": "VpcEndpointId",
- "VPCEndpointIDs": "VpcEndpointIds",
- "VPCEndpoints": "VpcEndpoints",
- "VPCID": "VpcId",
- "VPCIDs": "VpcIds",
- "VPCPeeringConnection": "VpcPeeringConnection",
- "VPCPeeringConnectionID": "VpcPeeringConnectionId",
- "VPCPeeringConnectionIDs": "VpcPeeringConnectionIds",
- "VPCPeeringConnections": "VpcPeeringConnections",
- "VPCs": "Vpcs",
- "VPNConnection": "VpnConnection",
- "VPNConnectionID": "VpnConnectionId",
- "VPNConnectionIDs": "VpnConnectionIds",
- "VPNConnections": "VpnConnections",
- "VPNGateway": "VpnGateway",
- "VPNGatewayID": "VpnGatewayId",
- "VPNGatewayIDs": "VpnGatewayIds",
- "VPNGateways": "VpnGateways",
- "VolumeID": "VolumeId",
- "VolumeIDs": "VolumeIds",
- },
- },
- "github.com/aws/aws-sdk-go/service/ec2/ec2iface": {
- operations: map[string]string{
- "AcceptVPCPeeringConnection": "AcceptVpcPeeringConnection",
- "AcceptVPCPeeringConnectionPages": "AcceptVpcPeeringConnectionPages",
- "AcceptVPCPeeringConnectionRequest": "AcceptVpcPeeringConnectionRequest",
- "AssignPrivateIPAddresses": "AssignPrivateIpAddresses",
- "AssignPrivateIPAddressesPages": "AssignPrivateIpAddressesPages",
- "AssignPrivateIPAddressesRequest": "AssignPrivateIpAddressesRequest",
- "AssociateDHCPOptions": "AssociateDhcpOptions",
- "AssociateDHCPOptionsPages": "AssociateDhcpOptionsPages",
- "AssociateDHCPOptionsRequest": "AssociateDhcpOptionsRequest",
- "AttachClassicLinkVPC": "AttachClassicLinkVpc",
- "AttachClassicLinkVPCPages": "AttachClassicLinkVpcPages",
- "AttachClassicLinkVPCRequest": "AttachClassicLinkVpcRequest",
- "AttachVPNGateway": "AttachVpnGateway",
- "AttachVPNGatewayPages": "AttachVpnGatewayPages",
- "AttachVPNGatewayRequest": "AttachVpnGatewayRequest",
- "CreateDHCPOptions": "CreateDhcpOptions",
- "CreateDHCPOptionsPages": "CreateDhcpOptionsPages",
- "CreateDHCPOptionsRequest": "CreateDhcpOptionsRequest",
- "CreateNetworkACL": "CreateNetworkAcl",
- "CreateNetworkACLEntry": "CreateNetworkAclEntry",
- "CreateNetworkACLEntryPages": "CreateNetworkAclEntryPages",
- "CreateNetworkACLEntryRequest": "CreateNetworkAclEntryRequest",
- "CreateNetworkACLPages": "CreateNetworkAclPages",
- "CreateNetworkACLRequest": "CreateNetworkAclRequest",
- "CreateVPC": "CreateVpc",
- "CreateVPCEndpoint": "CreateVpcEndpoint",
- "CreateVPCEndpointPages": "CreateVpcEndpointPages",
- "CreateVPCEndpointRequest": "CreateVpcEndpointRequest",
- "CreateVPCPages": "CreateVpcPages",
- "CreateVPCPeeringConnection": "CreateVpcPeeringConnection",
- "CreateVPCPeeringConnectionPages": "CreateVpcPeeringConnectionPages",
- "CreateVPCPeeringConnectionRequest": "CreateVpcPeeringConnectionRequest",
- "CreateVPCRequest": "CreateVpcRequest",
- "CreateVPNConnection": "CreateVpnConnection",
- "CreateVPNConnectionPages": "CreateVpnConnectionPages",
- "CreateVPNConnectionRequest": "CreateVpnConnectionRequest",
- "CreateVPNConnectionRoute": "CreateVpnConnectionRoute",
- "CreateVPNConnectionRoutePages": "CreateVpnConnectionRoutePages",
- "CreateVPNConnectionRouteRequest": "CreateVpnConnectionRouteRequest",
- "CreateVPNGateway": "CreateVpnGateway",
- "CreateVPNGatewayPages": "CreateVpnGatewayPages",
- "CreateVPNGatewayRequest": "CreateVpnGatewayRequest",
- "DeleteDHCPOptions": "DeleteDhcpOptions",
- "DeleteDHCPOptionsPages": "DeleteDhcpOptionsPages",
- "DeleteDHCPOptionsRequest": "DeleteDhcpOptionsRequest",
- "DeleteNetworkACL": "DeleteNetworkAcl",
- "DeleteNetworkACLEntry": "DeleteNetworkAclEntry",
- "DeleteNetworkACLEntryPages": "DeleteNetworkAclEntryPages",
- "DeleteNetworkACLEntryRequest": "DeleteNetworkAclEntryRequest",
- "DeleteNetworkACLPages": "DeleteNetworkAclPages",
- "DeleteNetworkACLRequest": "DeleteNetworkAclRequest",
- "DeleteVPC": "DeleteVpc",
- "DeleteVPCEndpoints": "DeleteVpcEndpoints",
- "DeleteVPCEndpointsPages": "DeleteVpcEndpointsPages",
- "DeleteVPCEndpointsRequest": "DeleteVpcEndpointsRequest",
- "DeleteVPCPages": "DeleteVpcPages",
- "DeleteVPCPeeringConnection": "DeleteVpcPeeringConnection",
- "DeleteVPCPeeringConnectionPages": "DeleteVpcPeeringConnectionPages",
- "DeleteVPCPeeringConnectionRequest": "DeleteVpcPeeringConnectionRequest",
- "DeleteVPCRequest": "DeleteVpcRequest",
- "DeleteVPNConnection": "DeleteVpnConnection",
- "DeleteVPNConnectionPages": "DeleteVpnConnectionPages",
- "DeleteVPNConnectionRequest": "DeleteVpnConnectionRequest",
- "DeleteVPNConnectionRoute": "DeleteVpnConnectionRoute",
- "DeleteVPNConnectionRoutePages": "DeleteVpnConnectionRoutePages",
- "DeleteVPNConnectionRouteRequest": "DeleteVpnConnectionRouteRequest",
- "DeleteVPNGateway": "DeleteVpnGateway",
- "DeleteVPNGatewayPages": "DeleteVpnGatewayPages",
- "DeleteVPNGatewayRequest": "DeleteVpnGatewayRequest",
- "DescribeDHCPOptions": "DescribeDhcpOptions",
- "DescribeDHCPOptionsPages": "DescribeDhcpOptionsPages",
- "DescribeDHCPOptionsRequest": "DescribeDhcpOptionsRequest",
- "DescribeNetworkACLs": "DescribeNetworkAcls",
- "DescribeNetworkACLsPages": "DescribeNetworkAclsPages",
- "DescribeNetworkACLsRequest": "DescribeNetworkAclsRequest",
- "DescribeVPCAttribute": "DescribeVpcAttribute",
- "DescribeVPCAttributePages": "DescribeVpcAttributePages",
- "DescribeVPCAttributeRequest": "DescribeVpcAttributeRequest",
- "DescribeVPCClassicLink": "DescribeVpcClassicLink",
- "DescribeVPCClassicLinkPages": "DescribeVpcClassicLinkPages",
- "DescribeVPCClassicLinkRequest": "DescribeVpcClassicLinkRequest",
- "DescribeVPCEndpointServices": "DescribeVpcEndpointServices",
- "DescribeVPCEndpointServicesPages": "DescribeVpcEndpointServicesPages",
- "DescribeVPCEndpointServicesRequest": "DescribeVpcEndpointServicesRequest",
- "DescribeVPCEndpoints": "DescribeVpcEndpoints",
- "DescribeVPCEndpointsPages": "DescribeVpcEndpointsPages",
- "DescribeVPCEndpointsRequest": "DescribeVpcEndpointsRequest",
- "DescribeVPCPeeringConnections": "DescribeVpcPeeringConnections",
- "DescribeVPCPeeringConnectionsPages": "DescribeVpcPeeringConnectionsPages",
- "DescribeVPCPeeringConnectionsRequest": "DescribeVpcPeeringConnectionsRequest",
- "DescribeVPCs": "DescribeVpcs",
- "DescribeVPCsPages": "DescribeVpcsPages",
- "DescribeVPCsRequest": "DescribeVpcsRequest",
- "DescribeVPNConnections": "DescribeVpnConnections",
- "DescribeVPNConnectionsPages": "DescribeVpnConnectionsPages",
- "DescribeVPNConnectionsRequest": "DescribeVpnConnectionsRequest",
- "DescribeVPNGateways": "DescribeVpnGateways",
- "DescribeVPNGatewaysPages": "DescribeVpnGatewaysPages",
- "DescribeVPNGatewaysRequest": "DescribeVpnGatewaysRequest",
- "DetachClassicLinkVPC": "DetachClassicLinkVpc",
- "DetachClassicLinkVPCPages": "DetachClassicLinkVpcPages",
- "DetachClassicLinkVPCRequest": "DetachClassicLinkVpcRequest",
- "DetachVPNGateway": "DetachVpnGateway",
- "DetachVPNGatewayPages": "DetachVpnGatewayPages",
- "DetachVPNGatewayRequest": "DetachVpnGatewayRequest",
- "DisableVGWRoutePropagation": "DisableVgwRoutePropagation",
- "DisableVGWRoutePropagationPages": "DisableVgwRoutePropagationPages",
- "DisableVGWRoutePropagationRequest": "DisableVgwRoutePropagationRequest",
- "DisableVPCClassicLink": "DisableVpcClassicLink",
- "DisableVPCClassicLinkPages": "DisableVpcClassicLinkPages",
- "DisableVPCClassicLinkRequest": "DisableVpcClassicLinkRequest",
- "EnableVGWRoutePropagation": "EnableVgwRoutePropagation",
- "EnableVGWRoutePropagationPages": "EnableVgwRoutePropagationPages",
- "EnableVGWRoutePropagationRequest": "EnableVgwRoutePropagationRequest",
- "EnableVPCClassicLink": "EnableVpcClassicLink",
- "EnableVPCClassicLinkPages": "EnableVpcClassicLinkPages",
- "EnableVPCClassicLinkRequest": "EnableVpcClassicLinkRequest",
- "ModifyVPCAttribute": "ModifyVpcAttribute",
- "ModifyVPCAttributePages": "ModifyVpcAttributePages",
- "ModifyVPCAttributeRequest": "ModifyVpcAttributeRequest",
- "ModifyVPCEndpoint": "ModifyVpcEndpoint",
- "ModifyVPCEndpointPages": "ModifyVpcEndpointPages",
- "ModifyVPCEndpointRequest": "ModifyVpcEndpointRequest",
- "MoveAddressToVPC": "MoveAddressToVpc",
- "MoveAddressToVPCPages": "MoveAddressToVpcPages",
- "MoveAddressToVPCRequest": "MoveAddressToVpcRequest",
- "RejectVPCPeeringConnection": "RejectVpcPeeringConnection",
- "RejectVPCPeeringConnectionPages": "RejectVpcPeeringConnectionPages",
- "RejectVPCPeeringConnectionRequest": "RejectVpcPeeringConnectionRequest",
- "ReplaceNetworkACLAssociation": "ReplaceNetworkAclAssociation",
- "ReplaceNetworkACLAssociationPages": "ReplaceNetworkAclAssociationPages",
- "ReplaceNetworkACLAssociationRequest": "ReplaceNetworkAclAssociationRequest",
- "ReplaceNetworkACLEntry": "ReplaceNetworkAclEntry",
- "ReplaceNetworkACLEntryPages": "ReplaceNetworkAclEntryPages",
- "ReplaceNetworkACLEntryRequest": "ReplaceNetworkAclEntryRequest",
- "UnassignPrivateIPAddresses": "UnassignPrivateIpAddresses",
- "UnassignPrivateIPAddressesPages": "UnassignPrivateIpAddressesPages",
- "UnassignPrivateIPAddressesRequest": "UnassignPrivateIpAddressesRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/ecs": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ARN": "Arn",
- "CPU": "Cpu",
- "ClusterARN": "ClusterArn",
- "ClusterARNs": "ClusterArns",
- "ContainerARN": "ContainerArn",
- "ContainerInstanceARN": "ContainerInstanceArn",
- "ContainerInstanceARNs": "ContainerInstanceArns",
- "EC2InstanceID": "Ec2InstanceId",
- "ID": "Id",
- "RoleARN": "RoleArn",
- "ServiceARN": "ServiceArn",
- "ServiceARNs": "ServiceArns",
- "TaskARN": "TaskArn",
- "TaskARNs": "TaskArns",
- "TaskDefinitionARN": "TaskDefinitionArn",
- "TaskDefinitionARNs": "TaskDefinitionArns",
- },
- },
- "github.com/aws/aws-sdk-go/service/ecs/ecsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/efs": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "FileSystemID": "FileSystemId",
- "IPAddress": "IpAddress",
- "MountTargetID": "MountTargetId",
- "NetworkInterfaceID": "NetworkInterfaceId",
- "OwnerID": "OwnerId",
- "SubnetID": "SubnetId",
- },
- },
- "github.com/aws/aws-sdk-go/service/efs/efsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/elasticache": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "CacheClusterID": "CacheClusterId",
- "CacheNodeID": "CacheNodeId",
- "CacheNodeIDsToReboot": "CacheNodeIdsToReboot",
- "CacheNodeIDsToRemove": "CacheNodeIdsToRemove",
- "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId",
- "NodeGroupID": "NodeGroupId",
- "NotificationTopicARN": "NotificationTopicArn",
- "OwnerID": "OwnerId",
- "PrimaryClusterID": "PrimaryClusterId",
- "ReplicationGroupID": "ReplicationGroupId",
- "ReservedCacheNodeID": "ReservedCacheNodeId",
- "ReservedCacheNodesOfferingID": "ReservedCacheNodesOfferingId",
- "SecurityGroupID": "SecurityGroupId",
- "SecurityGroupIDs": "SecurityGroupIds",
- "SnapshotARNs": "SnapshotArns",
- "SnapshottingClusterID": "SnapshottingClusterId",
- "SourceCacheNodeID": "SourceCacheNodeId",
- "SubnetIDs": "SubnetIds",
- "TopicARN": "TopicArn",
- "VPCID": "VpcId",
- },
- },
- "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/elasticbeanstalk": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "DestinationEnvironmentID": "DestinationEnvironmentId",
- "EC2InstanceID": "Ec2InstanceId",
- "EnvironmentID": "EnvironmentId",
- "EnvironmentIDs": "EnvironmentIds",
- "ID": "Id",
- "OK": "Ok",
- "RequestID": "RequestId",
- "SourceEnvironmentID": "SourceEnvironmentId",
- },
- },
- "github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/elastictranscoder": {
- operations: map[string]string{},
- shapes: map[string]string{
- "HLSContentProtection": "HlsContentProtection",
- "PlayReadyDRM": "PlayReadyDrm",
- },
- fields: map[string]string{
- "ARN": "Arn",
- "AWSKMSKeyARN": "AwsKmsKeyArn",
- "HLSContentProtection": "HlsContentProtection",
- "ID": "Id",
- "KeyID": "KeyId",
- "KeyMD5": "KeyMd5",
- "LicenseAcquisitionURL": "LicenseAcquisitionUrl",
- "PipelineID": "PipelineId",
- "PlayReadyDRM": "PlayReadyDrm",
- "PresetID": "PresetId",
- "PresetWatermarkID": "PresetWatermarkId",
- },
- },
- "github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/elb": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "InstanceID": "InstanceId",
- "SSLCertificateID": "SSLCertificateId",
- "VPCID": "VPCId",
- },
- },
- "github.com/aws/aws-sdk-go/service/elb/elbiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/emr": {
- operations: map[string]string{},
- shapes: map[string]string{
- "EC2InstanceAttributes": "Ec2InstanceAttributes",
- "HadoopJARStepConfig": "HadoopJarStepConfig",
- },
- fields: map[string]string{
- "AMIVersion": "AmiVersion",
- "ClusterID": "ClusterId",
- "EC2AvailabilityZone": "Ec2AvailabilityZone",
- "EC2InstanceAttributes": "Ec2InstanceAttributes",
- "EC2InstanceID": "Ec2InstanceId",
- "EC2InstanceIDsToTerminate": "EC2InstanceIdsToTerminate",
- "EC2KeyName": "Ec2KeyName",
- "EC2SubnetID": "Ec2SubnetId",
- "EMRManagedMasterSecurityGroup": "EmrManagedMasterSecurityGroup",
- "EMRManagedSlaveSecurityGroup": "EmrManagedSlaveSecurityGroup",
- "HadoopJARStep": "HadoopJarStep",
- "IAMInstanceProfile": "IamInstanceProfile",
- "ID": "Id",
- "InstanceGroupID": "InstanceGroupId",
- "InstanceGroupIDs": "InstanceGroupIds",
- "JAR": "Jar",
- "JobFlowID": "JobFlowId",
- "JobFlowIDs": "JobFlowIds",
- "LogURI": "LogUri",
- "MasterInstanceID": "MasterInstanceId",
- "MasterPublicDNSName": "MasterPublicDnsName",
- "PrivateDNSName": "PrivateDnsName",
- "PrivateIPAddress": "PrivateIpAddress",
- "PublicDNSName": "PublicDnsName",
- "PublicIPAddress": "PublicIpAddress",
- "RequestedAMIVersion": "RequestedAmiVersion",
- "ResourceID": "ResourceId",
- "RunningAMIVersion": "RunningAmiVersion",
- "StepID": "StepId",
- "StepIDs": "StepIds",
- },
- },
- "github.com/aws/aws-sdk-go/service/emr/emriface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/glacier": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "AccountID": "AccountId",
- "ArchiveID": "ArchiveId",
- "JobID": "JobId",
- "LockID": "LockId",
- "MultipartUploadID": "MultipartUploadId",
- "UploadID": "UploadId",
- },
- },
- "github.com/aws/aws-sdk-go/service/glacier/glacieriface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/iam": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ARN": "Arn",
- "AccessKeyID": "AccessKeyId",
- "CertificateID": "CertificateId",
- "DefaultVersionID": "DefaultVersionId",
- "GroupID": "GroupId",
- "InstanceProfileID": "InstanceProfileId",
- "OpenIDConnectProviderARN": "OpenIDConnectProviderArn",
- "PolicyARN": "PolicyArn",
- "PolicyID": "PolicyId",
- "RoleID": "RoleId",
- "SAMLProviderARN": "SAMLProviderArn",
- "SSHPublicKeyID": "SSHPublicKeyId",
- "ServerCertificateID": "ServerCertificateId",
- "URL": "Url",
- "UserID": "UserId",
- "VersionID": "VersionId",
- },
- },
- "github.com/aws/aws-sdk-go/service/iam/iamiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/kinesis": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "AdjacentParentShardID": "AdjacentParentShardId",
- "ExclusiveStartShardID": "ExclusiveStartShardId",
- "ParentShardID": "ParentShardId",
- "ShardID": "ShardId",
- },
- },
- "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/kms": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ARN": "Arn",
- "AWSAccountID": "AWSAccountId",
- "AliasARN": "AliasArn",
- "DestinationKeyID": "DestinationKeyId",
- "GrantID": "GrantId",
- "KeyARN": "KeyArn",
- "KeyID": "KeyId",
- "SourceKeyID": "SourceKeyId",
- "TargetKeyID": "TargetKeyId",
- },
- },
- "github.com/aws/aws-sdk-go/service/kms/kmsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/lambda": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "EventSourceARN": "EventSourceArn",
- "FunctionARN": "FunctionArn",
- "SourceARN": "SourceArn",
- "StatementID": "StatementId",
- },
- },
- "github.com/aws/aws-sdk-go/service/lambda/lambdaiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/machinelearning": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "BatchPredictionDataSourceID": "BatchPredictionDataSourceId",
- "BatchPredictionID": "BatchPredictionId",
- "CreatedByIAMUser": "CreatedByIamUser",
- "DataPipelineID": "DataPipelineId",
- "DataSchemaURI": "DataSchemaUri",
- "DataSourceID": "DataSourceId",
- "EndpointURL": "EndpointUrl",
- "EvaluationDataSourceID": "EvaluationDataSourceId",
- "EvaluationID": "EvaluationId",
- "LogURI": "LogUri",
- "MLModelID": "MLModelId",
- "OutputURI": "OutputUri",
- "RecipeURI": "RecipeUri",
- "SecurityGroupIDs": "SecurityGroupIds",
- "SelectSQLQuery": "SelectSqlQuery",
- "SubnetID": "SubnetId",
- "TrainingDataSourceID": "TrainingDataSourceId",
- },
- },
- "github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/mobileanalytics": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ID": "Id",
- },
- },
- "github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/opsworks": {
- operations: map[string]string{
- "AssociateElasticIP": "AssociateElasticIp",
- "AssociateElasticIPPages": "AssociateElasticIpPages",
- "AssociateElasticIPRequest": "AssociateElasticIpRequest",
- "DeregisterElasticIP": "DeregisterElasticIp",
- "DeregisterElasticIPPages": "DeregisterElasticIpPages",
- "DeregisterElasticIPRequest": "DeregisterElasticIpRequest",
- "DeregisterRDSDBInstance": "DeregisterRdsDbInstance",
- "DeregisterRDSDBInstancePages": "DeregisterRdsDbInstancePages",
- "DeregisterRDSDBInstanceRequest": "DeregisterRdsDbInstanceRequest",
- "DescribeElasticIPs": "DescribeElasticIps",
- "DescribeElasticIPsPages": "DescribeElasticIpsPages",
- "DescribeElasticIPsRequest": "DescribeElasticIpsRequest",
- "DescribeRAIDArrays": "DescribeRaidArrays",
- "DescribeRAIDArraysPages": "DescribeRaidArraysPages",
- "DescribeRAIDArraysRequest": "DescribeRaidArraysRequest",
- "DescribeRDSDBInstances": "DescribeRdsDbInstances",
- "DescribeRDSDBInstancesPages": "DescribeRdsDbInstancesPages",
- "DescribeRDSDBInstancesRequest": "DescribeRdsDbInstancesRequest",
- "DisassociateElasticIP": "DisassociateElasticIp",
- "DisassociateElasticIPPages": "DisassociateElasticIpPages",
- "DisassociateElasticIPRequest": "DisassociateElasticIpRequest",
- "RegisterElasticIP": "RegisterElasticIp",
- "RegisterElasticIPPages": "RegisterElasticIpPages",
- "RegisterElasticIPRequest": "RegisterElasticIpRequest",
- "RegisterRDSDBInstance": "RegisterRdsDbInstance",
- "RegisterRDSDBInstancePages": "RegisterRdsDbInstancePages",
- "RegisterRDSDBInstanceRequest": "RegisterRdsDbInstanceRequest",
- "UpdateElasticIP": "UpdateElasticIp",
- "UpdateElasticIPPages": "UpdateElasticIpPages",
- "UpdateElasticIPRequest": "UpdateElasticIpRequest",
- "UpdateRDSDBInstance": "UpdateRdsDbInstance",
- "UpdateRDSDBInstancePages": "UpdateRdsDbInstancePages",
- "UpdateRDSDBInstanceRequest": "UpdateRdsDbInstanceRequest",
- },
- shapes: map[string]string{
- "AssociateElasticIPInput": "AssociateElasticIpInput",
- "AssociateElasticIPOutput": "AssociateElasticIpOutput",
- "DeregisterElasticIPInput": "DeregisterElasticIpInput",
- "DeregisterElasticIPOutput": "DeregisterElasticIpOutput",
- "DeregisterRDSDBInstanceInput": "DeregisterRdsDbInstanceInput",
- "DeregisterRDSDBInstanceOutput": "DeregisterRdsDbInstanceOutput",
- "DescribeElasticIPsInput": "DescribeElasticIpsInput",
- "DescribeElasticIPsOutput": "DescribeElasticIpsOutput",
- "DescribeRAIDArraysInput": "DescribeRaidArraysInput",
- "DescribeRAIDArraysOutput": "DescribeRaidArraysOutput",
- "DescribeRDSDBInstancesInput": "DescribeRdsDbInstancesInput",
- "DescribeRDSDBInstancesOutput": "DescribeRdsDbInstancesOutput",
- "DisassociateElasticIPInput": "DisassociateElasticIpInput",
- "DisassociateElasticIPOutput": "DisassociateElasticIpOutput",
- "EBSBlockDevice": "EbsBlockDevice",
- "ElasticIP": "ElasticIp",
- "RAIDArray": "RaidArray",
- "RDSDBInstance": "RdsDbInstance",
- "RegisterElasticIPInput": "RegisterElasticIpInput",
- "RegisterElasticIPOutput": "RegisterElasticIpOutput",
- "RegisterRDSDBInstanceInput": "RegisterRdsDbInstanceInput",
- "RegisterRDSDBInstanceOutput": "RegisterRdsDbInstanceOutput",
- "SSLConfiguration": "SslConfiguration",
- "UpdateElasticIPInput": "UpdateElasticIpInput",
- "UpdateElasticIPOutput": "UpdateElasticIpOutput",
- "UpdateRDSDBInstanceInput": "UpdateRdsDbInstanceInput",
- "UpdateRDSDBInstanceOutput": "UpdateRdsDbInstanceOutput",
- },
- fields: map[string]string{
- "AMIID": "AmiId",
- "ARN": "Arn",
- "AgentInstallerURL": "AgentInstallerUrl",
- "AllowSSH": "AllowSsh",
- "AppID": "AppId",
- "AppIDs": "AppIds",
- "AutoAssignElasticIPs": "AutoAssignElasticIps",
- "AutoAssignPublicIPs": "AutoAssignPublicIps",
- "CPUThreshold": "CpuThreshold",
- "CloneAppIDs": "CloneAppIds",
- "CommandID": "CommandId",
- "CommandIDs": "CommandIds",
- "CustomInstanceProfileARN": "CustomInstanceProfileArn",
- "CustomJSON": "CustomJson",
- "CustomSecurityGroupIDs": "CustomSecurityGroupIds",
- "DBInstanceIdentifier": "DbInstanceIdentifier",
- "DBPassword": "DbPassword",
- "DBUser": "DbUser",
- "DNSName": "DnsName",
- "DefaultInstanceProfileARN": "DefaultInstanceProfileArn",
- "DefaultSSHKeyName": "DefaultSshKeyName",
- "DefaultSubnetID": "DefaultSubnetId",
- "DelayUntilELBConnectionsDrained": "DelayUntilElbConnectionsDrained",
- "DeleteElasticIP": "DeleteElasticIp",
- "DeploymentID": "DeploymentId",
- "DeploymentIDs": "DeploymentIds",
- "EBS": "Ebs",
- "EBSOptimized": "EbsOptimized",
- "EC2InstanceID": "Ec2InstanceId",
- "EC2InstanceIDs": "Ec2InstanceIds",
- "EC2VolumeID": "Ec2VolumeId",
- "EcsClusterARN": "EcsClusterArn",
- "EcsClusterARNs": "EcsClusterArns",
- "EcsContainerInstanceARN": "EcsContainerInstanceArn",
- "ElasticIP": "ElasticIp",
- "ElasticIPs": "ElasticIps",
- "EnableSSL": "EnableSsl",
- "IAMUserARN": "IamUserArn",
- "IAMUserARNs": "IamUserArns",
- "IOPS": "Iops",
- "IP": "Ip",
- "IPs": "Ips",
- "InstanceID": "InstanceId",
- "InstanceIDs": "InstanceIds",
- "InstanceProfileARN": "InstanceProfileArn",
- "LastServiceErrorID": "LastServiceErrorId",
- "LayerID": "LayerId",
- "LayerIDs": "LayerIds",
- "LogURL": "LogUrl",
- "MissingOnRDS": "MissingOnRds",
- "PrivateDNS": "PrivateDns",
- "PrivateIP": "PrivateIp",
- "PublicDNS": "PublicDns",
- "PublicIP": "PublicIp",
- "RAIDArrayID": "RaidArrayId",
- "RAIDArrayIDs": "RaidArrayIds",
- "RAIDArrays": "RaidArrays",
- "RAIDLevel": "RaidLevel",
- "RDSDBInstanceARN": "RdsDbInstanceArn",
- "RDSDBInstanceARNs": "RdsDbInstanceArns",
- "RDSDBInstances": "RdsDbInstances",
- "RSAPublicKey": "RsaPublicKey",
- "RSAPublicKeyFingerprint": "RsaPublicKeyFingerprint",
- "RootDeviceVolumeID": "RootDeviceVolumeId",
- "SSHHostDSAKeyFingerprint": "SshHostDsaKeyFingerprint",
- "SSHHostRSAKeyFingerprint": "SshHostRsaKeyFingerprint",
- "SSHKey": "SshKey",
- "SSHKeyName": "SshKeyName",
- "SSHPublicKey": "SshPublicKey",
- "SSHUsername": "SshUsername",
- "SSLConfiguration": "SslConfiguration",
- "SecurityGroupIDs": "SecurityGroupIds",
- "ServiceErrorID": "ServiceErrorId",
- "ServiceErrorIDs": "ServiceErrorIds",
- "ServiceRoleARN": "ServiceRoleArn",
- "SnapshotID": "SnapshotId",
- "SourceStackID": "SourceStackId",
- "StackID": "StackId",
- "StackIDs": "StackIds",
- "SubnetID": "SubnetId",
- "SubnetIDs": "SubnetIds",
- "URL": "Url",
- "UseEBSOptimizedInstances": "UseEbsOptimizedInstances",
- "UseOpsWorksSecurityGroups": "UseOpsworksSecurityGroups",
- "VPCID": "VpcId",
- "VolumeID": "VolumeId",
- "VolumeIDs": "VolumeIds",
- },
- },
- "github.com/aws/aws-sdk-go/service/opsworks/opsworksiface": {
- operations: map[string]string{
- "AssociateElasticIP": "AssociateElasticIp",
- "AssociateElasticIPPages": "AssociateElasticIpPages",
- "AssociateElasticIPRequest": "AssociateElasticIpRequest",
- "DeregisterElasticIP": "DeregisterElasticIp",
- "DeregisterElasticIPPages": "DeregisterElasticIpPages",
- "DeregisterElasticIPRequest": "DeregisterElasticIpRequest",
- "DeregisterRDSDBInstance": "DeregisterRdsDbInstance",
- "DeregisterRDSDBInstancePages": "DeregisterRdsDbInstancePages",
- "DeregisterRDSDBInstanceRequest": "DeregisterRdsDbInstanceRequest",
- "DescribeElasticIPs": "DescribeElasticIps",
- "DescribeElasticIPsPages": "DescribeElasticIpsPages",
- "DescribeElasticIPsRequest": "DescribeElasticIpsRequest",
- "DescribeRAIDArrays": "DescribeRaidArrays",
- "DescribeRAIDArraysPages": "DescribeRaidArraysPages",
- "DescribeRAIDArraysRequest": "DescribeRaidArraysRequest",
- "DescribeRDSDBInstances": "DescribeRdsDbInstances",
- "DescribeRDSDBInstancesPages": "DescribeRdsDbInstancesPages",
- "DescribeRDSDBInstancesRequest": "DescribeRdsDbInstancesRequest",
- "DisassociateElasticIP": "DisassociateElasticIp",
- "DisassociateElasticIPPages": "DisassociateElasticIpPages",
- "DisassociateElasticIPRequest": "DisassociateElasticIpRequest",
- "RegisterElasticIP": "RegisterElasticIp",
- "RegisterElasticIPPages": "RegisterElasticIpPages",
- "RegisterElasticIPRequest": "RegisterElasticIpRequest",
- "RegisterRDSDBInstance": "RegisterRdsDbInstance",
- "RegisterRDSDBInstancePages": "RegisterRdsDbInstancePages",
- "RegisterRDSDBInstanceRequest": "RegisterRdsDbInstanceRequest",
- "UpdateElasticIP": "UpdateElasticIp",
- "UpdateElasticIPPages": "UpdateElasticIpPages",
- "UpdateElasticIPRequest": "UpdateElasticIpRequest",
- "UpdateRDSDBInstance": "UpdateRdsDbInstance",
- "UpdateRDSDBInstancePages": "UpdateRdsDbInstancePages",
- "UpdateRDSDBInstanceRequest": "UpdateRdsDbInstanceRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/rds": {
- operations: map[string]string{},
- shapes: map[string]string{
- "VPCSecurityGroupMembership": "VpcSecurityGroupMembership",
- },
- fields: map[string]string{
- "AllowsVPCAndNonVPCInstanceMemberships": "AllowsVpcAndNonVpcInstanceMemberships",
- "CustSubscriptionID": "CustSubscriptionId",
- "CustomerAWSID": "CustomerAwsId",
- "DBIResourceID": "DbiResourceId",
- "DBInstancePort": "DbInstancePort",
- "EC2SecurityGroupID": "EC2SecurityGroupId",
- "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId",
- "IOPS": "Iops",
- "KMSKeyID": "KmsKeyId",
- "OwnerID": "OwnerId",
- "ReservedDBInstanceID": "ReservedDBInstanceId",
- "ReservedDBInstancesOfferingID": "ReservedDBInstancesOfferingId",
- "SNSTopicARN": "SnsTopicArn",
- "SourceIDs": "SourceIds",
- "SourceIDsList": "SourceIdsList",
- "SubnetIDs": "SubnetIds",
- "SupportsIOPS": "SupportsIops",
- "TDECredentialARN": "TdeCredentialArn",
- "TDECredentialPassword": "TdeCredentialPassword",
- "VPC": "Vpc",
- "VPCID": "VpcId",
- "VPCSecurityGroupID": "VpcSecurityGroupId",
- "VPCSecurityGroupIDs": "VpcSecurityGroupIds",
- "VPCSecurityGroupMemberships": "VpcSecurityGroupMemberships",
- "VPCSecurityGroups": "VpcSecurityGroups",
- },
- },
- "github.com/aws/aws-sdk-go/service/rds/rdsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/redshift": {
- operations: map[string]string{
- "CreateHSMClientCertificate": "CreateHsmClientCertificate",
- "CreateHSMClientCertificatePages": "CreateHsmClientCertificatePages",
- "CreateHSMClientCertificateRequest": "CreateHsmClientCertificateRequest",
- "CreateHSMConfiguration": "CreateHsmConfiguration",
- "CreateHSMConfigurationPages": "CreateHsmConfigurationPages",
- "CreateHSMConfigurationRequest": "CreateHsmConfigurationRequest",
- "DeleteHSMClientCertificate": "DeleteHsmClientCertificate",
- "DeleteHSMClientCertificatePages": "DeleteHsmClientCertificatePages",
- "DeleteHSMClientCertificateRequest": "DeleteHsmClientCertificateRequest",
- "DeleteHSMConfiguration": "DeleteHsmConfiguration",
- "DeleteHSMConfigurationPages": "DeleteHsmConfigurationPages",
- "DeleteHSMConfigurationRequest": "DeleteHsmConfigurationRequest",
- "DescribeHSMClientCertificates": "DescribeHsmClientCertificates",
- "DescribeHSMClientCertificatesPages": "DescribeHsmClientCertificatesPages",
- "DescribeHSMClientCertificatesRequest": "DescribeHsmClientCertificatesRequest",
- "DescribeHSMConfigurations": "DescribeHsmConfigurations",
- "DescribeHSMConfigurationsPages": "DescribeHsmConfigurationsPages",
- "DescribeHSMConfigurationsRequest": "DescribeHsmConfigurationsRequest",
- },
- shapes: map[string]string{
- "CreateHSMClientCertificateInput": "CreateHsmClientCertificateInput",
- "CreateHSMClientCertificateOutput": "CreateHsmClientCertificateOutput",
- "CreateHSMConfigurationInput": "CreateHsmConfigurationInput",
- "CreateHSMConfigurationOutput": "CreateHsmConfigurationOutput",
- "DeleteHSMClientCertificateInput": "DeleteHsmClientCertificateInput",
- "DeleteHSMClientCertificateOutput": "DeleteHsmClientCertificateOutput",
- "DeleteHSMConfigurationInput": "DeleteHsmConfigurationInput",
- "DeleteHSMConfigurationOutput": "DeleteHsmConfigurationOutput",
- "DescribeHSMClientCertificatesInput": "DescribeHsmClientCertificatesInput",
- "DescribeHSMClientCertificatesOutput": "DescribeHsmClientCertificatesOutput",
- "DescribeHSMConfigurationsInput": "DescribeHsmConfigurationsInput",
- "DescribeHSMConfigurationsOutput": "DescribeHsmConfigurationsOutput",
- "ElasticIPStatus": "ElasticIpStatus",
- "HSMClientCertificate": "HsmClientCertificate",
- "HSMConfiguration": "HsmConfiguration",
- "HSMStatus": "HsmStatus",
- "VPCSecurityGroupMembership": "VpcSecurityGroupMembership",
- },
- fields: map[string]string{
- "AccountID": "AccountId",
- "CustSubscriptionID": "CustSubscriptionId",
- "CustomerAWSID": "CustomerAwsId",
- "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId",
- "ElasticIP": "ElasticIp",
- "ElasticIPStatus": "ElasticIpStatus",
- "EventID": "EventId",
- "HSMClientCertificate": "HsmClientCertificate",
- "HSMClientCertificateIdentifier": "HsmClientCertificateIdentifier",
- "HSMClientCertificatePublicKey": "HsmClientCertificatePublicKey",
- "HSMClientCertificates": "HsmClientCertificates",
- "HSMConfiguration": "HsmConfiguration",
- "HSMConfigurationIdentifier": "HsmConfigurationIdentifier",
- "HSMConfigurations": "HsmConfigurations",
- "HSMIPAddress": "HsmIpAddress",
- "HSMPartitionName": "HsmPartitionName",
- "HSMPartitionPassword": "HsmPartitionPassword",
- "HSMServerPublicCertificate": "HsmServerPublicCertificate",
- "HSMStatus": "HsmStatus",
- "KMSKeyID": "KmsKeyId",
- "ReservedNodeID": "ReservedNodeId",
- "ReservedNodeOfferingID": "ReservedNodeOfferingId",
- "SNSTopicARN": "SnsTopicArn",
- "SourceIDs": "SourceIds",
- "SourceIDsList": "SourceIdsList",
- "SubnetIDs": "SubnetIds",
- "VPCID": "VpcId",
- "VPCSecurityGroupID": "VpcSecurityGroupId",
- "VPCSecurityGroupIDs": "VpcSecurityGroupIds",
- "VPCSecurityGroups": "VpcSecurityGroups",
- },
- },
- "github.com/aws/aws-sdk-go/service/redshift/redshiftiface": {
- operations: map[string]string{
- "CreateHSMClientCertificate": "CreateHsmClientCertificate",
- "CreateHSMClientCertificatePages": "CreateHsmClientCertificatePages",
- "CreateHSMClientCertificateRequest": "CreateHsmClientCertificateRequest",
- "CreateHSMConfiguration": "CreateHsmConfiguration",
- "CreateHSMConfigurationPages": "CreateHsmConfigurationPages",
- "CreateHSMConfigurationRequest": "CreateHsmConfigurationRequest",
- "DeleteHSMClientCertificate": "DeleteHsmClientCertificate",
- "DeleteHSMClientCertificatePages": "DeleteHsmClientCertificatePages",
- "DeleteHSMClientCertificateRequest": "DeleteHsmClientCertificateRequest",
- "DeleteHSMConfiguration": "DeleteHsmConfiguration",
- "DeleteHSMConfigurationPages": "DeleteHsmConfigurationPages",
- "DeleteHSMConfigurationRequest": "DeleteHsmConfigurationRequest",
- "DescribeHSMClientCertificates": "DescribeHsmClientCertificates",
- "DescribeHSMClientCertificatesPages": "DescribeHsmClientCertificatesPages",
- "DescribeHSMClientCertificatesRequest": "DescribeHsmClientCertificatesRequest",
- "DescribeHSMConfigurations": "DescribeHsmConfigurations",
- "DescribeHSMConfigurationsPages": "DescribeHsmConfigurationsPages",
- "DescribeHSMConfigurationsRequest": "DescribeHsmConfigurationsRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/route53": {
- operations: map[string]string{
- "GetCheckerIPRanges": "GetCheckerIpRanges",
- "GetCheckerIPRangesPages": "GetCheckerIpRangesPages",
- "GetCheckerIPRangesRequest": "GetCheckerIpRangesRequest",
- },
- shapes: map[string]string{
- "GetCheckerIPRangesInput": "GetCheckerIpRangesInput",
- "GetCheckerIPRangesOutput": "GetCheckerIpRangesOutput",
- },
- fields: map[string]string{
- "CheckerIPRanges": "CheckerIpRanges",
- "DelegationSetID": "DelegationSetId",
- "HealthCheckID": "HealthCheckId",
- "HostedZoneID": "HostedZoneId",
- "ID": "Id",
- "NextHostedZoneID": "NextHostedZoneId",
- "ResourceID": "ResourceId",
- "ResourceIDs": "ResourceIds",
- "VPCID": "VPCId",
- },
- },
- "github.com/aws/aws-sdk-go/service/route53/route53iface": {
- operations: map[string]string{
- "GetCheckerIPRanges": "GetCheckerIpRanges",
- "GetCheckerIPRangesPages": "GetCheckerIpRangesPages",
- "GetCheckerIPRangesRequest": "GetCheckerIpRangesRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/route53domains": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "DNSSec": "DnsSec",
- "GlueIPs": "GlueIps",
- "IDNLangCode": "IdnLangCode",
- "OperationID": "OperationId",
- "RegistrarURL": "RegistrarUrl",
- "RegistryDomainID": "RegistryDomainId",
- },
- },
- "github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/s3": {
- operations: map[string]string{
- "DeleteBucketCORS": "DeleteBucketCors",
- "DeleteBucketCORSPages": "DeleteBucketCorsPages",
- "DeleteBucketCORSRequest": "DeleteBucketCorsRequest",
- "GetBucketACL": "GetBucketAcl",
- "GetBucketACLPages": "GetBucketAclPages",
- "GetBucketACLRequest": "GetBucketAclRequest",
- "GetBucketCORS": "GetBucketCors",
- "GetBucketCORSPages": "GetBucketCorsPages",
- "GetBucketCORSRequest": "GetBucketCorsRequest",
- "GetObjectACL": "GetObjectAcl",
- "GetObjectACLPages": "GetObjectAclPages",
- "GetObjectACLRequest": "GetObjectAclRequest",
- "PutBucketACL": "PutBucketAcl",
- "PutBucketACLPages": "PutBucketAclPages",
- "PutBucketACLRequest": "PutBucketAclRequest",
- "PutBucketCORS": "PutBucketCors",
- "PutBucketCORSPages": "PutBucketCorsPages",
- "PutBucketCORSRequest": "PutBucketCorsRequest",
- "PutObjectACL": "PutObjectAcl",
- "PutObjectACLPages": "PutObjectAclPages",
- "PutObjectACLRequest": "PutObjectAclRequest",
- },
- shapes: map[string]string{
- "DeleteBucketCORSInput": "DeleteBucketCorsInput",
- "DeleteBucketCORSOutput": "DeleteBucketCorsOutput",
- "GetBucketACLInput": "GetBucketAclInput",
- "GetBucketACLOutput": "GetBucketAclOutput",
- "GetBucketCORSInput": "GetBucketCorsInput",
- "GetBucketCORSOutput": "GetBucketCorsOutput",
- "GetObjectACLInput": "GetObjectAclInput",
- "GetObjectACLOutput": "GetObjectAclOutput",
- "PutBucketACLInput": "PutBucketAclInput",
- "PutBucketACLOutput": "PutBucketAclOutput",
- "PutBucketCORSInput": "PutBucketCorsInput",
- "PutBucketCORSOutput": "PutBucketCorsOutput",
- "PutObjectACLInput": "PutObjectAclInput",
- "PutObjectACLOutput": "PutObjectAclOutput",
- },
- fields: map[string]string{
- "CopySourceVersionID": "CopySourceVersionId",
- "DeleteMarkerVersionID": "DeleteMarkerVersionId",
- "HTTPErrorCodeReturnedEquals": "HttpErrorCodeReturnedEquals",
- "HTTPRedirectCode": "HttpRedirectCode",
- "ID": "Id",
- "LambdaFunctionARN": "LambdaFunctionArn",
- "NextUploadIDMarker": "NextUploadIdMarker",
- "NextVersionIDMarker": "NextVersionIdMarker",
- "QueueARN": "QueueArn",
- "SSEKMSKeyID": "SSEKMSKeyId",
- "TopicARN": "TopicArn",
- "UploadID": "UploadId",
- "UploadIDMarker": "UploadIdMarker",
- "VersionID": "VersionId",
- "VersionIDMarker": "VersionIdMarker",
- },
- },
- "github.com/aws/aws-sdk-go/service/s3/s3manager": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "UploadID": "UploadId",
- },
- },
- "github.com/aws/aws-sdk-go/service/s3/s3iface": {
- operations: map[string]string{
- "DeleteBucketCORS": "DeleteBucketCors",
- "DeleteBucketCORSPages": "DeleteBucketCorsPages",
- "DeleteBucketCORSRequest": "DeleteBucketCorsRequest",
- "GetBucketACL": "GetBucketAcl",
- "GetBucketACLPages": "GetBucketAclPages",
- "GetBucketACLRequest": "GetBucketAclRequest",
- "GetBucketCORS": "GetBucketCors",
- "GetBucketCORSPages": "GetBucketCorsPages",
- "GetBucketCORSRequest": "GetBucketCorsRequest",
- "GetObjectACL": "GetObjectAcl",
- "GetObjectACLPages": "GetObjectAclPages",
- "GetObjectACLRequest": "GetObjectAclRequest",
- "PutBucketACL": "PutBucketAcl",
- "PutBucketACLPages": "PutBucketAclPages",
- "PutBucketACLRequest": "PutBucketAclRequest",
- "PutBucketCORS": "PutBucketCors",
- "PutBucketCORSPages": "PutBucketCorsPages",
- "PutBucketCORSRequest": "PutBucketCorsRequest",
- "PutObjectACL": "PutObjectAcl",
- "PutObjectACLPages": "PutObjectAclPages",
- "PutObjectACLRequest": "PutObjectAclRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/ses": {
- operations: map[string]string{
- "GetIdentityDKIMAttributes": "GetIdentityDkimAttributes",
- "GetIdentityDKIMAttributesPages": "GetIdentityDkimAttributesPages",
- "GetIdentityDKIMAttributesRequest": "GetIdentityDkimAttributesRequest",
- "SetIdentityDKIMEnabled": "SetIdentityDkimEnabled",
- "SetIdentityDKIMEnabledPages": "SetIdentityDkimEnabledPages",
- "SetIdentityDKIMEnabledRequest": "SetIdentityDkimEnabledRequest",
- "VerifyDomainDKIM": "VerifyDomainDkim",
- "VerifyDomainDKIMPages": "VerifyDomainDkimPages",
- "VerifyDomainDKIMRequest": "VerifyDomainDkimRequest",
- },
- shapes: map[string]string{
- "GetIdentityDKIMAttributesInput": "GetIdentityDkimAttributesInput",
- "GetIdentityDKIMAttributesOutput": "GetIdentityDkimAttributesOutput",
- "IdentityDKIMAttributes": "IdentityDkimAttributes",
- "SetIdentityDKIMEnabledInput": "SetIdentityDkimEnabledInput",
- "SetIdentityDKIMEnabledOutput": "SetIdentityDkimEnabledOutput",
- "VerifyDomainDKIMInput": "VerifyDomainDkimInput",
- "VerifyDomainDKIMOutput": "VerifyDomainDkimOutput",
- },
- fields: map[string]string{
- "BCCAddresses": "BccAddresses",
- "CCAddresses": "CcAddresses",
- "DKIMAttributes": "DkimAttributes",
- "DKIMEnabled": "DkimEnabled",
- "DKIMTokens": "DkimTokens",
- "DKIMVerificationStatus": "DkimVerificationStatus",
- "FromARN": "FromArn",
- "HTML": "Html",
- "MessageID": "MessageId",
- "ReturnPathARN": "ReturnPathArn",
- "SNSTopic": "SnsTopic",
- "SourceARN": "SourceArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/ses/sesiface": {
- operations: map[string]string{
- "GetIdentityDKIMAttributes": "GetIdentityDkimAttributes",
- "GetIdentityDKIMAttributesPages": "GetIdentityDkimAttributesPages",
- "GetIdentityDKIMAttributesRequest": "GetIdentityDkimAttributesRequest",
- "SetIdentityDKIMEnabled": "SetIdentityDkimEnabled",
- "SetIdentityDKIMEnabledPages": "SetIdentityDkimEnabledPages",
- "SetIdentityDKIMEnabledRequest": "SetIdentityDkimEnabledRequest",
- "VerifyDomainDKIM": "VerifyDomainDkim",
- "VerifyDomainDKIMPages": "VerifyDomainDkimPages",
- "VerifyDomainDKIMRequest": "VerifyDomainDkimRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/sns": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "AWSAccountID": "AWSAccountId",
- "EndpointARN": "EndpointArn",
- "MessageID": "MessageId",
- "PlatformApplicationARN": "PlatformApplicationArn",
- "SubscriptionARN": "SubscriptionArn",
- "TargetARN": "TargetArn",
- "TopicARN": "TopicArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/sns/snsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/sqs": {
- operations: map[string]string{
- "GetQueueURL": "GetQueueUrl",
- "GetQueueURLPages": "GetQueueUrlPages",
- "GetQueueURLRequest": "GetQueueUrlRequest",
- },
- shapes: map[string]string{
- "GetQueueURLInput": "GetQueueUrlInput",
- "GetQueueURLOutput": "GetQueueUrlOutput",
- },
- fields: map[string]string{
- "AWSAccountIDs": "AWSAccountIds",
- "ID": "Id",
- "MessageID": "MessageId",
- "QueueOwnerAWSAccountID": "QueueOwnerAWSAccountId",
- "QueueURL": "QueueUrl",
- "QueueURLs": "QueueUrls",
- },
- },
- "github.com/aws/aws-sdk-go/service/sqs/sqsiface": {
- operations: map[string]string{
- "GetQueueURL": "GetQueueUrl",
- "GetQueueURLPages": "GetQueueUrlPages",
- "GetQueueURLRequest": "GetQueueUrlRequest",
- },
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/ssm": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "InstanceID": "InstanceId",
- "SHA1": "Sha1",
- },
- },
- "github.com/aws/aws-sdk-go/service/ssm/ssmiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/storagegateway": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "DiskID": "DiskId",
- "DiskIDs": "DiskIds",
- "GatewayID": "GatewayId",
- "IPV4Address": "Ipv4Address",
- "IPV6Address": "Ipv6Address",
- "MACAddress": "MacAddress",
- "NetworkInterfaceID": "NetworkInterfaceId",
- "SnapshotID": "SnapshotId",
- "SourceSnapshotID": "SourceSnapshotId",
- "VolumeDiskID": "VolumeDiskId",
- "VolumeID": "VolumeId",
- },
- },
- "github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/sts": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ARN": "Arn",
- "AccessKeyID": "AccessKeyId",
- "AssumedRoleID": "AssumedRoleId",
- "ExternalID": "ExternalId",
- "FederatedUserID": "FederatedUserId",
- "PrincipalARN": "PrincipalArn",
- "ProviderID": "ProviderId",
- "RoleARN": "RoleArn",
- },
- },
- "github.com/aws/aws-sdk-go/service/sts/stsiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/support": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "AttachmentID": "AttachmentId",
- "AttachmentSetID": "AttachmentSetId",
- "CCEmailAddresses": "CcEmailAddresses",
- "CaseID": "CaseId",
- "CaseIDList": "CaseIdList",
- "CheckID": "CheckId",
- "CheckIDs": "CheckIds",
- "DisplayID": "DisplayId",
- "ID": "Id",
- "ResourceID": "ResourceId",
- },
- },
- "github.com/aws/aws-sdk-go/service/support/supportiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/swf": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "ActivityID": "ActivityId",
- "ContinuedExecutionRunID": "ContinuedExecutionRunId",
- "DecisionTaskCompletedEventID": "DecisionTaskCompletedEventId",
- "EventID": "EventId",
- "ExternalInitiatedEventID": "ExternalInitiatedEventId",
- "ID": "Id",
- "InitiatedEventID": "InitiatedEventId",
- "LatestCancelRequestedEventID": "LatestCancelRequestedEventId",
- "NewExecutionRunID": "NewExecutionRunId",
- "ParentInitiatedEventID": "ParentInitiatedEventId",
- "PreviousStartedEventID": "PreviousStartedEventId",
- "RunID": "RunId",
- "ScheduledEventID": "ScheduledEventId",
- "StartedEventID": "StartedEventId",
- "TimerID": "TimerId",
- "WorkflowID": "WorkflowId",
- },
- },
- "github.com/aws/aws-sdk-go/service/swf/swfiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
- "github.com/aws/aws-sdk-go/service/workspaces": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{
- "BundleID": "BundleId",
- "BundleIDs": "BundleIds",
- "CustomSecurityGroupID": "CustomSecurityGroupId",
- "DNSIPAddresses": "DnsIpAddresses",
- "DefaultOU": "DefaultOu",
- "DirectoryID": "DirectoryId",
- "DirectoryIDs": "DirectoryIds",
- "IAMRoleID": "IamRoleId",
- "IPAddress": "IpAddress",
- "SubnetID": "SubnetId",
- "SubnetIDs": "SubnetIds",
- "WorkspaceID": "WorkspaceId",
- "WorkspaceIDs": "WorkspaceIds",
- "WorkspaceSecurityGroupID": "WorkspaceSecurityGroupId",
- },
- },
- "github.com/aws/aws-sdk-go/service/workspaces/workspacesiface": {
- operations: map[string]string{},
- shapes: map[string]string{},
- fields: map[string]string{},
- },
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go
deleted file mode 100644
index ab516b8..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build go1.5,deprecated
-
-package main
-
-//go:generate go run -tags deprecated gen/gen.go
-
-import (
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-
- "github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename"
-)
-
-var safeTag = "4e554f77f00d527b452c68a46f2e68595284121b"
-
-func main() {
- gopath := os.Getenv("GOPATH")
- if gopath == "" {
- panic("GOPATH not set!")
- }
- gopath = strings.Split(gopath, ":")[0]
-
- // change directory to SDK
- err := os.Chdir(filepath.Join(gopath, "src", "github.com", "aws", "aws-sdk-go"))
- if err != nil {
- panic("Cannot find SDK repository")
- }
-
- // store orig HEAD
- head, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output()
- if err != nil {
- panic("Cannot find SDK repository")
- }
- origHEAD := strings.Trim(string(head), " \r\n")
-
- // checkout to safe tag and run conversion
- exec.Command("git", "checkout", safeTag).Run()
- defer func() {
- exec.Command("git", "checkout", origHEAD).Run()
- }()
-
- rename.ParsePathsFromArgs()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE
deleted file mode 100644
index 6a66aea..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/PATENTS b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/PATENTS
deleted file mode 100644
index 7330990..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
deleted file mode 100644
index 340c9e6..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
+++ /dev/null
@@ -1,624 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package astutil
-
-// This file defines utilities for working with source positions.
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "sort"
-)
-
-// PathEnclosingInterval returns the node that encloses the source
-// interval [start, end), and all its ancestors up to the AST root.
-//
-// The definition of "enclosing" used by this function considers
-// additional whitespace abutting a node to be enclosed by it.
-// In this example:
-//
-// z := x + y // add them
-// <-A->
-// <----B----->
-//
-// the ast.BinaryExpr(+) node is considered to enclose interval B
-// even though its [Pos()..End()) is actually only interval A.
-// This behaviour makes user interfaces more tolerant of imperfect
-// input.
-//
-// This function treats tokens as nodes, though they are not included
-// in the result. e.g. PathEnclosingInterval("+") returns the
-// enclosing ast.BinaryExpr("x + y").
-//
-// If start==end, the 1-char interval following start is used instead.
-//
-// The 'exact' result is true if the interval contains only path[0]
-// and perhaps some adjacent whitespace. It is false if the interval
-// overlaps multiple children of path[0], or if it contains only
-// interior whitespace of path[0].
-// In this example:
-//
-// z := x + y // add them
-// <--C--> <---E-->
-// ^
-// D
-//
-// intervals C, D and E are inexact. C is contained by the
-// z-assignment statement, because it spans three of its children (:=,
-// x, +). So too is the 1-char interval D, because it contains only
-// interior whitespace of the assignment. E is considered interior
-// whitespace of the BlockStmt containing the assignment.
-//
-// Precondition: [start, end) both lie within the same file as root.
-// TODO(adonovan): return (nil, false) in this case and remove precond.
-// Requires FileSet; see loader.tokenFileContainsPos.
-//
-// Postcondition: path is never nil; it always contains at least 'root'.
-//
-func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
- // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
-
- // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
- var visit func(node ast.Node) bool
- visit = func(node ast.Node) bool {
- path = append(path, node)
-
- nodePos := node.Pos()
- nodeEnd := node.End()
-
- // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
-
- // Intersect [start, end) with interval of node.
- if start < nodePos {
- start = nodePos
- }
- if end > nodeEnd {
- end = nodeEnd
- }
-
- // Find sole child that contains [start, end).
- children := childrenOf(node)
- l := len(children)
- for i, child := range children {
- // [childPos, childEnd) is unaugmented interval of child.
- childPos := child.Pos()
- childEnd := child.End()
-
- // [augPos, augEnd) is whitespace-augmented interval of child.
- augPos := childPos
- augEnd := childEnd
- if i > 0 {
- augPos = children[i-1].End() // start of preceding whitespace
- }
- if i < l-1 {
- nextChildPos := children[i+1].Pos()
- // Does [start, end) lie between child and next child?
- if start >= augEnd && end <= nextChildPos {
- return false // inexact match
- }
- augEnd = nextChildPos // end of following whitespace
- }
-
- // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
- // i, augPos, augEnd, start, end) // debugging
-
- // Does augmented child strictly contain [start, end)?
- if augPos <= start && end <= augEnd {
- _, isToken := child.(tokenNode)
- return isToken || visit(child)
- }
-
- // Does [start, end) overlap multiple children?
- // i.e. left-augmented child contains start
- // but LR-augmented child does not contain end.
- if start < childEnd && end > augEnd {
- break
- }
- }
-
- // No single child contained [start, end),
- // so node is the result. Is it exact?
-
- // (It's tempting to put this condition before the
- // child loop, but it gives the wrong result in the
- // case where a node (e.g. ExprStmt) and its sole
- // child have equal intervals.)
- if start == nodePos && end == nodeEnd {
- return true // exact match
- }
-
- return false // inexact: overlaps multiple children
- }
-
- if start > end {
- start, end = end, start
- }
-
- if start < root.End() && end > root.Pos() {
- if start == end {
- end = start + 1 // empty interval => interval of size 1
- }
- exact = visit(root)
-
- // Reverse the path:
- for i, l := 0, len(path); i < l/2; i++ {
- path[i], path[l-1-i] = path[l-1-i], path[i]
- }
- } else {
- // Selection lies within whitespace preceding the
- // first (or following the last) declaration in the file.
- // The result nonetheless always includes the ast.File.
- path = append(path, root)
- }
-
- return
-}
-
-// tokenNode is a dummy implementation of ast.Node for a single token.
-// They are used transiently by PathEnclosingInterval but never escape
-// this package.
-//
-type tokenNode struct {
- pos token.Pos
- end token.Pos
-}
-
-func (n tokenNode) Pos() token.Pos {
- return n.pos
-}
-
-func (n tokenNode) End() token.Pos {
- return n.end
-}
-
-func tok(pos token.Pos, len int) ast.Node {
- return tokenNode{pos, pos + token.Pos(len)}
-}
-
-// childrenOf returns the direct non-nil children of ast.Node n.
-// It may include fake ast.Node implementations for bare tokens.
-// it is not safe to call (e.g.) ast.Walk on such nodes.
-//
-func childrenOf(n ast.Node) []ast.Node {
- var children []ast.Node
-
- // First add nodes for all true subtrees.
- ast.Inspect(n, func(node ast.Node) bool {
- if node == n { // push n
- return true // recur
- }
- if node != nil { // push child
- children = append(children, node)
- }
- return false // no recursion
- })
-
- // Then add fake Nodes for bare tokens.
- switch n := n.(type) {
- case *ast.ArrayType:
- children = append(children,
- tok(n.Lbrack, len("[")),
- tok(n.Elt.End(), len("]")))
-
- case *ast.AssignStmt:
- children = append(children,
- tok(n.TokPos, len(n.Tok.String())))
-
- case *ast.BasicLit:
- children = append(children,
- tok(n.ValuePos, len(n.Value)))
-
- case *ast.BinaryExpr:
- children = append(children, tok(n.OpPos, len(n.Op.String())))
-
- case *ast.BlockStmt:
- children = append(children,
- tok(n.Lbrace, len("{")),
- tok(n.Rbrace, len("}")))
-
- case *ast.BranchStmt:
- children = append(children,
- tok(n.TokPos, len(n.Tok.String())))
-
- case *ast.CallExpr:
- children = append(children,
- tok(n.Lparen, len("(")),
- tok(n.Rparen, len(")")))
- if n.Ellipsis != 0 {
- children = append(children, tok(n.Ellipsis, len("...")))
- }
-
- case *ast.CaseClause:
- if n.List == nil {
- children = append(children,
- tok(n.Case, len("default")))
- } else {
- children = append(children,
- tok(n.Case, len("case")))
- }
- children = append(children, tok(n.Colon, len(":")))
-
- case *ast.ChanType:
- switch n.Dir {
- case ast.RECV:
- children = append(children, tok(n.Begin, len("<-chan")))
- case ast.SEND:
- children = append(children, tok(n.Begin, len("chan<-")))
- case ast.RECV | ast.SEND:
- children = append(children, tok(n.Begin, len("chan")))
- }
-
- case *ast.CommClause:
- if n.Comm == nil {
- children = append(children,
- tok(n.Case, len("default")))
- } else {
- children = append(children,
- tok(n.Case, len("case")))
- }
- children = append(children, tok(n.Colon, len(":")))
-
- case *ast.Comment:
- // nop
-
- case *ast.CommentGroup:
- // nop
-
- case *ast.CompositeLit:
- children = append(children,
- tok(n.Lbrace, len("{")),
- tok(n.Rbrace, len("{")))
-
- case *ast.DeclStmt:
- // nop
-
- case *ast.DeferStmt:
- children = append(children,
- tok(n.Defer, len("defer")))
-
- case *ast.Ellipsis:
- children = append(children,
- tok(n.Ellipsis, len("...")))
-
- case *ast.EmptyStmt:
- // nop
-
- case *ast.ExprStmt:
- // nop
-
- case *ast.Field:
- // TODO(adonovan): Field.{Doc,Comment,Tag}?
-
- case *ast.FieldList:
- children = append(children,
- tok(n.Opening, len("(")),
- tok(n.Closing, len(")")))
-
- case *ast.File:
- // TODO test: Doc
- children = append(children,
- tok(n.Package, len("package")))
-
- case *ast.ForStmt:
- children = append(children,
- tok(n.For, len("for")))
-
- case *ast.FuncDecl:
- // TODO(adonovan): FuncDecl.Comment?
-
- // Uniquely, FuncDecl breaks the invariant that
- // preorder traversal yields tokens in lexical order:
- // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
- //
- // As a workaround, we inline the case for FuncType
- // here and order things correctly.
- //
- children = nil // discard ast.Walk(FuncDecl) info subtrees
- children = append(children, tok(n.Type.Func, len("func")))
- if n.Recv != nil {
- children = append(children, n.Recv)
- }
- children = append(children, n.Name)
- if n.Type.Params != nil {
- children = append(children, n.Type.Params)
- }
- if n.Type.Results != nil {
- children = append(children, n.Type.Results)
- }
- if n.Body != nil {
- children = append(children, n.Body)
- }
-
- case *ast.FuncLit:
- // nop
-
- case *ast.FuncType:
- if n.Func != 0 {
- children = append(children,
- tok(n.Func, len("func")))
- }
-
- case *ast.GenDecl:
- children = append(children,
- tok(n.TokPos, len(n.Tok.String())))
- if n.Lparen != 0 {
- children = append(children,
- tok(n.Lparen, len("(")),
- tok(n.Rparen, len(")")))
- }
-
- case *ast.GoStmt:
- children = append(children,
- tok(n.Go, len("go")))
-
- case *ast.Ident:
- children = append(children,
- tok(n.NamePos, len(n.Name)))
-
- case *ast.IfStmt:
- children = append(children,
- tok(n.If, len("if")))
-
- case *ast.ImportSpec:
- // TODO(adonovan): ImportSpec.{Doc,EndPos}?
-
- case *ast.IncDecStmt:
- children = append(children,
- tok(n.TokPos, len(n.Tok.String())))
-
- case *ast.IndexExpr:
- children = append(children,
- tok(n.Lbrack, len("{")),
- tok(n.Rbrack, len("}")))
-
- case *ast.InterfaceType:
- children = append(children,
- tok(n.Interface, len("interface")))
-
- case *ast.KeyValueExpr:
- children = append(children,
- tok(n.Colon, len(":")))
-
- case *ast.LabeledStmt:
- children = append(children,
- tok(n.Colon, len(":")))
-
- case *ast.MapType:
- children = append(children,
- tok(n.Map, len("map")))
-
- case *ast.ParenExpr:
- children = append(children,
- tok(n.Lparen, len("(")),
- tok(n.Rparen, len(")")))
-
- case *ast.RangeStmt:
- children = append(children,
- tok(n.For, len("for")),
- tok(n.TokPos, len(n.Tok.String())))
-
- case *ast.ReturnStmt:
- children = append(children,
- tok(n.Return, len("return")))
-
- case *ast.SelectStmt:
- children = append(children,
- tok(n.Select, len("select")))
-
- case *ast.SelectorExpr:
- // nop
-
- case *ast.SendStmt:
- children = append(children,
- tok(n.Arrow, len("<-")))
-
- case *ast.SliceExpr:
- children = append(children,
- tok(n.Lbrack, len("[")),
- tok(n.Rbrack, len("]")))
-
- case *ast.StarExpr:
- children = append(children, tok(n.Star, len("*")))
-
- case *ast.StructType:
- children = append(children, tok(n.Struct, len("struct")))
-
- case *ast.SwitchStmt:
- children = append(children, tok(n.Switch, len("switch")))
-
- case *ast.TypeAssertExpr:
- children = append(children,
- tok(n.Lparen-1, len(".")),
- tok(n.Lparen, len("(")),
- tok(n.Rparen, len(")")))
-
- case *ast.TypeSpec:
- // TODO(adonovan): TypeSpec.{Doc,Comment}?
-
- case *ast.TypeSwitchStmt:
- children = append(children, tok(n.Switch, len("switch")))
-
- case *ast.UnaryExpr:
- children = append(children, tok(n.OpPos, len(n.Op.String())))
-
- case *ast.ValueSpec:
- // TODO(adonovan): ValueSpec.{Doc,Comment}?
-
- case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
- // nop
- }
-
- // TODO(adonovan): opt: merge the logic of ast.Inspect() into
- // the switch above so we can make interleaved callbacks for
- // both Nodes and Tokens in the right order and avoid the need
- // to sort.
- sort.Sort(byPos(children))
-
- return children
-}
-
-type byPos []ast.Node
-
-func (sl byPos) Len() int {
- return len(sl)
-}
-func (sl byPos) Less(i, j int) bool {
- return sl[i].Pos() < sl[j].Pos()
-}
-func (sl byPos) Swap(i, j int) {
- sl[i], sl[j] = sl[j], sl[i]
-}
-
-// NodeDescription returns a description of the concrete type of n suitable
-// for a user interface.
-//
-// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
-// StarExpr) we could be much more specific given the path to the AST
-// root. Perhaps we should do that.
-//
-func NodeDescription(n ast.Node) string {
- switch n := n.(type) {
- case *ast.ArrayType:
- return "array type"
- case *ast.AssignStmt:
- return "assignment"
- case *ast.BadDecl:
- return "bad declaration"
- case *ast.BadExpr:
- return "bad expression"
- case *ast.BadStmt:
- return "bad statement"
- case *ast.BasicLit:
- return "basic literal"
- case *ast.BinaryExpr:
- return fmt.Sprintf("binary %s operation", n.Op)
- case *ast.BlockStmt:
- return "block"
- case *ast.BranchStmt:
- switch n.Tok {
- case token.BREAK:
- return "break statement"
- case token.CONTINUE:
- return "continue statement"
- case token.GOTO:
- return "goto statement"
- case token.FALLTHROUGH:
- return "fall-through statement"
- }
- case *ast.CallExpr:
- return "function call (or conversion)"
- case *ast.CaseClause:
- return "case clause"
- case *ast.ChanType:
- return "channel type"
- case *ast.CommClause:
- return "communication clause"
- case *ast.Comment:
- return "comment"
- case *ast.CommentGroup:
- return "comment group"
- case *ast.CompositeLit:
- return "composite literal"
- case *ast.DeclStmt:
- return NodeDescription(n.Decl) + " statement"
- case *ast.DeferStmt:
- return "defer statement"
- case *ast.Ellipsis:
- return "ellipsis"
- case *ast.EmptyStmt:
- return "empty statement"
- case *ast.ExprStmt:
- return "expression statement"
- case *ast.Field:
- // Can be any of these:
- // struct {x, y int} -- struct field(s)
- // struct {T} -- anon struct field
- // interface {I} -- interface embedding
- // interface {f()} -- interface method
- // func (A) func(B) C -- receiver, param(s), result(s)
- return "field/method/parameter"
- case *ast.FieldList:
- return "field/method/parameter list"
- case *ast.File:
- return "source file"
- case *ast.ForStmt:
- return "for loop"
- case *ast.FuncDecl:
- return "function declaration"
- case *ast.FuncLit:
- return "function literal"
- case *ast.FuncType:
- return "function type"
- case *ast.GenDecl:
- switch n.Tok {
- case token.IMPORT:
- return "import declaration"
- case token.CONST:
- return "constant declaration"
- case token.TYPE:
- return "type declaration"
- case token.VAR:
- return "variable declaration"
- }
- case *ast.GoStmt:
- return "go statement"
- case *ast.Ident:
- return "identifier"
- case *ast.IfStmt:
- return "if statement"
- case *ast.ImportSpec:
- return "import specification"
- case *ast.IncDecStmt:
- if n.Tok == token.INC {
- return "increment statement"
- }
- return "decrement statement"
- case *ast.IndexExpr:
- return "index expression"
- case *ast.InterfaceType:
- return "interface type"
- case *ast.KeyValueExpr:
- return "key/value association"
- case *ast.LabeledStmt:
- return "statement label"
- case *ast.MapType:
- return "map type"
- case *ast.Package:
- return "package"
- case *ast.ParenExpr:
- return "parenthesized " + NodeDescription(n.X)
- case *ast.RangeStmt:
- return "range loop"
- case *ast.ReturnStmt:
- return "return statement"
- case *ast.SelectStmt:
- return "select statement"
- case *ast.SelectorExpr:
- return "selector"
- case *ast.SendStmt:
- return "channel send"
- case *ast.SliceExpr:
- return "slice expression"
- case *ast.StarExpr:
- return "*-operation" // load/store expr or pointer type
- case *ast.StructType:
- return "struct type"
- case *ast.SwitchStmt:
- return "switch statement"
- case *ast.TypeAssertExpr:
- return "type assertion"
- case *ast.TypeSpec:
- return "type specification"
- case *ast.TypeSwitchStmt:
- return "type switch"
- case *ast.UnaryExpr:
- return fmt.Sprintf("unary %s operation", n.Op)
- case *ast.ValueSpec:
- return "value specification"
-
- }
- panic(fmt.Sprintf("unexpected node type: %T", n))
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/imports.go
deleted file mode 100644
index a47bcfa..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/imports.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package astutil contains common utilities for working with the Go AST.
-package astutil
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "strconv"
- "strings"
-)
-
-// AddImport adds the import path to the file f, if absent.
-func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) {
- return AddNamedImport(fset, f, "", ipath)
-}
-
-// AddNamedImport adds the import path to the file f, if absent.
-// If name is not empty, it is used to rename the import.
-//
-// For example, calling
-// AddNamedImport(fset, f, "pathpkg", "path")
-// adds
-// import pathpkg "path"
-func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
- if imports(f, ipath) {
- return false
- }
-
- newImport := &ast.ImportSpec{
- Path: &ast.BasicLit{
- Kind: token.STRING,
- Value: strconv.Quote(ipath),
- },
- }
- if name != "" {
- newImport.Name = &ast.Ident{Name: name}
- }
-
- // Find an import decl to add to.
- // The goal is to find an existing import
- // whose import path has the longest shared
- // prefix with ipath.
- var (
- bestMatch = -1 // length of longest shared prefix
- lastImport = -1 // index in f.Decls of the file's final import decl
- impDecl *ast.GenDecl // import decl containing the best match
- impIndex = -1 // spec index in impDecl containing the best match
- )
- for i, decl := range f.Decls {
- gen, ok := decl.(*ast.GenDecl)
- if ok && gen.Tok == token.IMPORT {
- lastImport = i
- // Do not add to import "C", to avoid disrupting the
- // association with its doc comment, breaking cgo.
- if declImports(gen, "C") {
- continue
- }
-
- // Match an empty import decl if that's all that is available.
- if len(gen.Specs) == 0 && bestMatch == -1 {
- impDecl = gen
- }
-
- // Compute longest shared prefix with imports in this group.
- for j, spec := range gen.Specs {
- impspec := spec.(*ast.ImportSpec)
- n := matchLen(importPath(impspec), ipath)
- if n > bestMatch {
- bestMatch = n
- impDecl = gen
- impIndex = j
- }
- }
- }
- }
-
- // If no import decl found, add one after the last import.
- if impDecl == nil {
- impDecl = &ast.GenDecl{
- Tok: token.IMPORT,
- }
- if lastImport >= 0 {
- impDecl.TokPos = f.Decls[lastImport].End()
- } else {
- // There are no existing imports.
- // Our new import goes after the package declaration and after
- // the comment, if any, that starts on the same line as the
- // package declaration.
- impDecl.TokPos = f.Package
-
- file := fset.File(f.Package)
- pkgLine := file.Line(f.Package)
- for _, c := range f.Comments {
- if file.Line(c.Pos()) > pkgLine {
- break
- }
- impDecl.TokPos = c.End()
- }
- }
- f.Decls = append(f.Decls, nil)
- copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
- f.Decls[lastImport+1] = impDecl
- }
-
- // Insert new import at insertAt.
- insertAt := 0
- if impIndex >= 0 {
- // insert after the found import
- insertAt = impIndex + 1
- }
- impDecl.Specs = append(impDecl.Specs, nil)
- copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
- impDecl.Specs[insertAt] = newImport
- pos := impDecl.Pos()
- if insertAt > 0 {
- // If there is a comment after an existing import, preserve the comment
- // position by adding the new import after the comment.
- if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
- pos = spec.Comment.End()
- } else {
- // Assign same position as the previous import,
- // so that the sorter sees it as being in the same block.
- pos = impDecl.Specs[insertAt-1].Pos()
- }
- }
- if newImport.Name != nil {
- newImport.Name.NamePos = pos
- }
- newImport.Path.ValuePos = pos
- newImport.EndPos = pos
-
- // Clean up parens. impDecl contains at least one spec.
- if len(impDecl.Specs) == 1 {
- // Remove unneeded parens.
- impDecl.Lparen = token.NoPos
- } else if !impDecl.Lparen.IsValid() {
- // impDecl needs parens added.
- impDecl.Lparen = impDecl.Specs[0].Pos()
- }
-
- f.Imports = append(f.Imports, newImport)
-
- if len(f.Decls) <= 1 {
- return true
- }
-
- // Merge all the import declarations into the first one.
- var first *ast.GenDecl
- for i, decl := range f.Decls {
- gen, ok := decl.(*ast.GenDecl)
- if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
- continue
- }
- if first == nil {
- first = gen
- continue // Don't touch the first one.
- }
- // Move the imports of the other import declaration to the first one.
- for _, spec := range gen.Specs {
- spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
- first.Specs = append(first.Specs, spec)
- }
- f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
- }
-
- return true
-}
-
-// DeleteImport deletes the import path from the file f, if present.
-func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
- return DeleteNamedImport(fset, f, "", path)
-}
-
-// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
-func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
- var delspecs []*ast.ImportSpec
-
- // Find the import nodes that import path, if any.
- for i := 0; i < len(f.Decls); i++ {
- decl := f.Decls[i]
- gen, ok := decl.(*ast.GenDecl)
- if !ok || gen.Tok != token.IMPORT {
- continue
- }
- for j := 0; j < len(gen.Specs); j++ {
- spec := gen.Specs[j]
- impspec := spec.(*ast.ImportSpec)
- if impspec.Name == nil && name != "" {
- continue
- }
- if impspec.Name != nil && impspec.Name.Name != name {
- continue
- }
- if importPath(impspec) != path {
- continue
- }
-
- // We found an import spec that imports path.
- // Delete it.
- delspecs = append(delspecs, impspec)
- deleted = true
- copy(gen.Specs[j:], gen.Specs[j+1:])
- gen.Specs = gen.Specs[:len(gen.Specs)-1]
-
- // If this was the last import spec in this decl,
- // delete the decl, too.
- if len(gen.Specs) == 0 {
- copy(f.Decls[i:], f.Decls[i+1:])
- f.Decls = f.Decls[:len(f.Decls)-1]
- i--
- break
- } else if len(gen.Specs) == 1 {
- gen.Lparen = token.NoPos // drop parens
- }
- if j > 0 {
- lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
- lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
- line := fset.Position(impspec.Path.ValuePos).Line
-
- // We deleted an entry but now there may be
- // a blank line-sized hole where the import was.
- if line-lastLine > 1 {
- // There was a blank line immediately preceding the deleted import,
- // so there's no need to close the hole.
- // Do nothing.
- } else {
- // There was no blank line. Close the hole.
- fset.File(gen.Rparen).MergeLine(line)
- }
- }
- j--
- }
- }
-
- // Delete them from f.Imports.
- for i := 0; i < len(f.Imports); i++ {
- imp := f.Imports[i]
- for j, del := range delspecs {
- if imp == del {
- copy(f.Imports[i:], f.Imports[i+1:])
- f.Imports = f.Imports[:len(f.Imports)-1]
- copy(delspecs[j:], delspecs[j+1:])
- delspecs = delspecs[:len(delspecs)-1]
- i--
- break
- }
- }
- }
-
- if len(delspecs) > 0 {
- panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
- }
-
- return
-}
-
-// RewriteImport rewrites any import of path oldPath to path newPath.
-func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
- for _, imp := range f.Imports {
- if importPath(imp) == oldPath {
- rewrote = true
- // record old End, because the default is to compute
- // it using the length of imp.Path.Value.
- imp.EndPos = imp.End()
- imp.Path.Value = strconv.Quote(newPath)
- }
- }
- return
-}
-
-// UsesImport reports whether a given import is used.
-func UsesImport(f *ast.File, path string) (used bool) {
- spec := importSpec(f, path)
- if spec == nil {
- return
- }
-
- name := spec.Name.String()
- switch name {
- case "":
- // If the package name is not explicitly specified,
- // make an educated guess. This is not guaranteed to be correct.
- lastSlash := strings.LastIndex(path, "/")
- if lastSlash == -1 {
- name = path
- } else {
- name = path[lastSlash+1:]
- }
- case "_", ".":
- // Not sure if this import is used - err on the side of caution.
- return true
- }
-
- ast.Walk(visitFn(func(n ast.Node) {
- sel, ok := n.(*ast.SelectorExpr)
- if ok && isTopName(sel.X, name) {
- used = true
- }
- }), f)
-
- return
-}
-
-type visitFn func(node ast.Node)
-
-func (fn visitFn) Visit(node ast.Node) ast.Visitor {
- fn(node)
- return fn
-}
-
-// imports returns true if f imports path.
-func imports(f *ast.File, path string) bool {
- return importSpec(f, path) != nil
-}
-
-// importSpec returns the import spec if f imports path,
-// or nil otherwise.
-func importSpec(f *ast.File, path string) *ast.ImportSpec {
- for _, s := range f.Imports {
- if importPath(s) == path {
- return s
- }
- }
- return nil
-}
-
-// importPath returns the unquoted import path of s,
-// or "" if the path is not properly quoted.
-func importPath(s *ast.ImportSpec) string {
- t, err := strconv.Unquote(s.Path.Value)
- if err == nil {
- return t
- }
- return ""
-}
-
-// declImports reports whether gen contains an import of path.
-func declImports(gen *ast.GenDecl, path string) bool {
- if gen.Tok != token.IMPORT {
- return false
- }
- for _, spec := range gen.Specs {
- impspec := spec.(*ast.ImportSpec)
- if importPath(impspec) == path {
- return true
- }
- }
- return false
-}
-
-// matchLen returns the length of the longest path segment prefix shared by x and y.
-func matchLen(x, y string) int {
- n := 0
- for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
- if x[i] == '/' {
- n++
- }
- }
- return n
-}
-
-// isTopName returns true if n is a top-level unresolved identifier with the given name.
-func isTopName(n ast.Expr, name string) bool {
- id, ok := n.(*ast.Ident)
- return ok && id.Name == name && id.Obj == nil
-}
-
-// Imports returns the file imports grouped by paragraph.
-func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
- var groups [][]*ast.ImportSpec
-
- for _, decl := range f.Decls {
- genDecl, ok := decl.(*ast.GenDecl)
- if !ok || genDecl.Tok != token.IMPORT {
- break
- }
-
- group := []*ast.ImportSpec{}
-
- var lastLine int
- for _, spec := range genDecl.Specs {
- importSpec := spec.(*ast.ImportSpec)
- pos := importSpec.Path.ValuePos
- line := fset.Position(pos).Line
- if lastLine > 0 && pos > 0 && line-lastLine > 1 {
- groups = append(groups, group)
- group = []*ast.ImportSpec{}
- }
- group = append(group, importSpec)
- lastLine = line
- }
- groups = append(groups, group)
- }
-
- return groups
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/util.go
deleted file mode 100644
index 7630629..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/ast/astutil/util.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package astutil
-
-import "go/ast"
-
-// Unparen returns e with any enclosing parentheses stripped.
-func Unparen(e ast.Expr) ast.Expr {
- for {
- p, ok := e.(*ast.ParenExpr)
- if !ok {
- return e
- }
- e = p.X
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/allpackages.go
deleted file mode 100644
index 3020809..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/allpackages.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package buildutil provides utilities related to the go/build
-// package in the standard library.
-//
-// All I/O is done via the build.Context file system interface, which must
-// be concurrency-safe.
-package buildutil
-
-import (
- "go/build"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "sync"
-)
-
-// AllPackages returns the package path of each Go package in any source
-// directory of the specified build context (e.g. $GOROOT or an element
-// of $GOPATH). Errors are ignored. The results are sorted.
-// All package paths are canonical, and thus may contain "/vendor/".
-//
-// The result may include import paths for directories that contain no
-// *.go files, such as "archive" (in $GOROOT/src).
-//
-// All I/O is done via the build.Context file system interface,
-// which must be concurrency-safe.
-//
-func AllPackages(ctxt *build.Context) []string {
- var list []string
- ForEachPackage(ctxt, func(pkg string, _ error) {
- list = append(list, pkg)
- })
- sort.Strings(list)
- return list
-}
-
-// ForEachPackage calls the found function with the package path of
-// each Go package it finds in any source directory of the specified
-// build context (e.g. $GOROOT or an element of $GOPATH).
-// All package paths are canonical, and thus may contain "/vendor/".
-//
-// If the package directory exists but could not be read, the second
-// argument to the found function provides the error.
-//
-// All I/O is done via the build.Context file system interface,
-// which must be concurrency-safe.
-//
-func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
- ch := make(chan item)
-
- var wg sync.WaitGroup
- for _, root := range ctxt.SrcDirs() {
- root := root
- wg.Add(1)
- go func() {
- allPackages(ctxt, root, ch)
- wg.Done()
- }()
- }
- go func() {
- wg.Wait()
- close(ch)
- }()
-
- // All calls to found occur in the caller's goroutine.
- for i := range ch {
- found(i.importPath, i.err)
- }
-}
-
-type item struct {
- importPath string
- err error // (optional)
-}
-
-// We use a process-wide counting semaphore to limit
-// the number of parallel calls to ReadDir.
-var ioLimit = make(chan bool, 20)
-
-func allPackages(ctxt *build.Context, root string, ch chan<- item) {
- root = filepath.Clean(root) + string(os.PathSeparator)
-
- var wg sync.WaitGroup
-
- var walkDir func(dir string)
- walkDir = func(dir string) {
- // Avoid .foo, _foo, and testdata directory trees.
- base := filepath.Base(dir)
- if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
- return
- }
-
- pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
-
- // Prune search if we encounter any of these import paths.
- switch pkg {
- case "builtin":
- return
- }
-
- ioLimit <- true
- files, err := ReadDir(ctxt, dir)
- <-ioLimit
- if pkg != "" || err != nil {
- ch <- item{pkg, err}
- }
- for _, fi := range files {
- fi := fi
- if fi.IsDir() {
- wg.Add(1)
- go func() {
- walkDir(filepath.Join(dir, fi.Name()))
- wg.Done()
- }()
- }
- }
- }
-
- walkDir(root)
- wg.Wait()
-}
-
-// ExpandPatterns returns the set of packages matched by patterns,
-// which may have the following forms:
-//
-// golang.org/x/tools/cmd/guru # a single package
-// golang.org/x/tools/... # all packages beneath dir
-// ... # the entire workspace.
-//
-// Order is significant: a pattern preceded by '-' removes matching
-// packages from the set. For example, these patterns match all encoding
-// packages except encoding/xml:
-//
-// encoding/... -encoding/xml
-//
-func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
- // TODO(adonovan): support other features of 'go list':
- // - "std"/"cmd"/"all" meta-packages
- // - "..." not at the end of a pattern
- // - relative patterns using "./" or "../" prefix
-
- pkgs := make(map[string]bool)
- doPkg := func(pkg string, neg bool) {
- if neg {
- delete(pkgs, pkg)
- } else {
- pkgs[pkg] = true
- }
- }
-
- // Scan entire workspace if wildcards are present.
- // TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
- var all []string
- for _, arg := range patterns {
- if strings.HasSuffix(arg, "...") {
- all = AllPackages(ctxt)
- break
- }
- }
-
- for _, arg := range patterns {
- if arg == "" {
- continue
- }
-
- neg := arg[0] == '-'
- if neg {
- arg = arg[1:]
- }
-
- if arg == "..." {
- // ... matches all packages
- for _, pkg := range all {
- doPkg(pkg, neg)
- }
- } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
- // dir/... matches all packages beneath dir
- for _, pkg := range all {
- if strings.HasPrefix(pkg, dir) &&
- (len(pkg) == len(dir) || pkg[len(dir)] == '/') {
- doPkg(pkg, neg)
- }
- }
- } else {
- // single package
- doPkg(arg, neg)
- }
- }
-
- return pkgs
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/fakecontext.go
deleted file mode 100644
index 24cbcbe..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/fakecontext.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package buildutil
-
-import (
- "fmt"
- "go/build"
- "io"
- "io/ioutil"
- "os"
- "path"
- "path/filepath"
- "sort"
- "strings"
- "time"
-)
-
-// FakeContext returns a build.Context for the fake file tree specified
-// by pkgs, which maps package import paths to a mapping from file base
-// names to contents.
-//
-// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
-// the necessary file access methods to read from memory instead of the
-// real file system.
-//
-// Unlike a real file tree, the fake one has only two levels---packages
-// and files---so ReadDir("/go/src/") returns all packages under
-// /go/src/ including, for instance, "math" and "math/big".
-// ReadDir("/go/src/math/big") would return all the files in the
-// "math/big" package.
-//
-func FakeContext(pkgs map[string]map[string]string) *build.Context {
- clean := func(filename string) string {
- f := path.Clean(filepath.ToSlash(filename))
- // Removing "/go/src" while respecting segment
- // boundaries has this unfortunate corner case:
- if f == "/go/src" {
- return ""
- }
- return strings.TrimPrefix(f, "/go/src/")
- }
-
- ctxt := build.Default // copy
- ctxt.GOROOT = "/go"
- ctxt.GOPATH = ""
- ctxt.IsDir = func(dir string) bool {
- dir = clean(dir)
- if dir == "" {
- return true // needed by (*build.Context).SrcDirs
- }
- return pkgs[dir] != nil
- }
- ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
- dir = clean(dir)
- var fis []os.FileInfo
- if dir == "" {
- // enumerate packages
- for importPath := range pkgs {
- fis = append(fis, fakeDirInfo(importPath))
- }
- } else {
- // enumerate files of package
- for basename := range pkgs[dir] {
- fis = append(fis, fakeFileInfo(basename))
- }
- }
- sort.Sort(byName(fis))
- return fis, nil
- }
- ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
- filename = clean(filename)
- dir, base := path.Split(filename)
- content, ok := pkgs[path.Clean(dir)][base]
- if !ok {
- return nil, fmt.Errorf("file not found: %s", filename)
- }
- return ioutil.NopCloser(strings.NewReader(content)), nil
- }
- ctxt.IsAbsPath = func(path string) bool {
- path = filepath.ToSlash(path)
- // Don't rely on the default (filepath.Path) since on
- // Windows, it reports virtual paths as non-absolute.
- return strings.HasPrefix(path, "/")
- }
- return &ctxt
-}
-
-type byName []os.FileInfo
-
-func (s byName) Len() int { return len(s) }
-func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
-
-type fakeFileInfo string
-
-func (fi fakeFileInfo) Name() string { return string(fi) }
-func (fakeFileInfo) Sys() interface{} { return nil }
-func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
-func (fakeFileInfo) IsDir() bool { return false }
-func (fakeFileInfo) Size() int64 { return 0 }
-func (fakeFileInfo) Mode() os.FileMode { return 0644 }
-
-type fakeDirInfo string
-
-func (fd fakeDirInfo) Name() string { return string(fd) }
-func (fakeDirInfo) Sys() interface{} { return nil }
-func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
-func (fakeDirInfo) IsDir() bool { return true }
-func (fakeDirInfo) Size() int64 { return 0 }
-func (fakeDirInfo) Mode() os.FileMode { return 0755 }
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/tags.go
deleted file mode 100644
index 486606f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/tags.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package buildutil
-
-// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
-
-import "fmt"
-
-const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
- "For more information about build tags, see the description of " +
- "build constraints in the documentation for the go/build package"
-
-// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
-// a flag value in the same manner as go build's -tags flag and
-// populates a []string slice.
-//
-// See $GOROOT/src/go/build/doc.go for description of build tags.
-// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
-//
-// Example:
-// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
-type TagsFlag []string
-
-func (v *TagsFlag) Set(s string) error {
- var err error
- *v, err = splitQuotedFields(s)
- if *v == nil {
- *v = []string{}
- }
- return err
-}
-
-func (v *TagsFlag) Get() interface{} { return *v }
-
-func splitQuotedFields(s string) ([]string, error) {
- // Split fields allowing '' or "" around elements.
- // Quotes further inside the string do not count.
- var f []string
- for len(s) > 0 {
- for len(s) > 0 && isSpaceByte(s[0]) {
- s = s[1:]
- }
- if len(s) == 0 {
- break
- }
- // Accepted quoted string. No unescaping inside.
- if s[0] == '"' || s[0] == '\'' {
- quote := s[0]
- s = s[1:]
- i := 0
- for i < len(s) && s[i] != quote {
- i++
- }
- if i >= len(s) {
- return nil, fmt.Errorf("unterminated %c string", quote)
- }
- f = append(f, s[:i])
- s = s[i+1:]
- continue
- }
- i := 0
- for i < len(s) && !isSpaceByte(s[i]) {
- i++
- }
- f = append(f, s[:i])
- s = s[i:]
- }
- return f, nil
-}
-
-func (v *TagsFlag) String() string {
- return ""
-}
-
-func isSpaceByte(c byte) bool {
- return c == ' ' || c == '\t' || c == '\n' || c == '\r'
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/util.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/util.go
deleted file mode 100644
index 0e093fc..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/buildutil/util.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package buildutil
-
-import (
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "io"
- "io/ioutil"
- "os"
- "path"
- "path/filepath"
- "runtime"
- "strings"
-)
-
-// ParseFile behaves like parser.ParseFile,
-// but uses the build context's file system interface, if any.
-//
-// If file is not absolute (as defined by IsAbsPath), the (dir, file)
-// components are joined using JoinPath; dir must be absolute.
-//
-// The displayPath function, if provided, is used to transform the
-// filename that will be attached to the ASTs.
-//
-// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
-//
-func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
- if !IsAbsPath(ctxt, file) {
- file = JoinPath(ctxt, dir, file)
- }
- rd, err := OpenFile(ctxt, file)
- if err != nil {
- return nil, err
- }
- defer rd.Close() // ignore error
- if displayPath != nil {
- file = displayPath(file)
- }
- return parser.ParseFile(fset, file, rd, mode)
-}
-
-// ContainingPackage returns the package containing filename.
-//
-// If filename is not absolute, it is interpreted relative to working directory dir.
-// All I/O is via the build context's file system interface, if any.
-//
-// The '...Files []string' fields of the resulting build.Package are not
-// populated (build.FindOnly mode).
-//
-// TODO(adonovan): call this from oracle when the tree thaws.
-//
-func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
- if !IsAbsPath(ctxt, filename) {
- filename = JoinPath(ctxt, dir, filename)
- }
-
- // We must not assume the file tree uses
- // "/" always,
- // `\` always,
- // or os.PathSeparator (which varies by platform),
- // but to make any progress, we are forced to assume that
- // paths will not use `\` unless the PathSeparator
- // is also `\`, thus we can rely on filepath.ToSlash for some sanity.
-
- dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
-
- // We assume that no source root (GOPATH[i] or GOROOT) contains any other.
- for _, srcdir := range ctxt.SrcDirs() {
- srcdirSlash := filepath.ToSlash(srcdir) + "/"
- if dirHasPrefix(dirSlash, srcdirSlash) {
- importPath := dirSlash[len(srcdirSlash) : len(dirSlash)-len("/")]
- return ctxt.Import(importPath, dir, build.FindOnly)
- }
- }
-
- return nil, fmt.Errorf("can't find package containing %s", filename)
-}
-
-// dirHasPrefix tests whether the directory dir begins with prefix.
-func dirHasPrefix(dir, prefix string) bool {
- if runtime.GOOS != "windows" {
- return strings.HasPrefix(dir, prefix)
- }
- return len(dir) >= len(prefix) && strings.EqualFold(dir[:len(prefix)], prefix)
-}
-
-// -- Effective methods of file system interface -------------------------
-
-// (go/build.Context defines these as methods, but does not export them.)
-
-// TODO(adonovan): HasSubdir?
-
-// FileExists returns true if the specified file exists,
-// using the build context's file system interface.
-func FileExists(ctxt *build.Context, path string) bool {
- if ctxt.OpenFile != nil {
- r, err := ctxt.OpenFile(path)
- if err != nil {
- return false
- }
- r.Close() // ignore error
- return true
- }
- _, err := os.Stat(path)
- return err == nil
-}
-
-// OpenFile behaves like os.Open,
-// but uses the build context's file system interface, if any.
-func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
- if ctxt.OpenFile != nil {
- return ctxt.OpenFile(path)
- }
- return os.Open(path)
-}
-
-// IsAbsPath behaves like filepath.IsAbs,
-// but uses the build context's file system interface, if any.
-func IsAbsPath(ctxt *build.Context, path string) bool {
- if ctxt.IsAbsPath != nil {
- return ctxt.IsAbsPath(path)
- }
- return filepath.IsAbs(path)
-}
-
-// JoinPath behaves like filepath.Join,
-// but uses the build context's file system interface, if any.
-func JoinPath(ctxt *build.Context, path ...string) string {
- if ctxt.JoinPath != nil {
- return ctxt.JoinPath(path...)
- }
- return filepath.Join(path...)
-}
-
-// IsDir behaves like os.Stat plus IsDir,
-// but uses the build context's file system interface, if any.
-func IsDir(ctxt *build.Context, path string) bool {
- if ctxt.IsDir != nil {
- return ctxt.IsDir(path)
- }
- fi, err := os.Stat(path)
- return err == nil && fi.IsDir()
-}
-
-// ReadDir behaves like ioutil.ReadDir,
-// but uses the build context's file system interface, if any.
-func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
- if ctxt.ReadDir != nil {
- return ctxt.ReadDir(path)
- }
- return ioutil.ReadDir(path)
-}
-
-// SplitPathList behaves like filepath.SplitList,
-// but uses the build context's file system interface, if any.
-func SplitPathList(ctxt *build.Context, s string) []string {
- if ctxt.SplitPathList != nil {
- return ctxt.SplitPathList(s)
- }
- return filepath.SplitList(s)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/cgo.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/cgo.go
deleted file mode 100644
index 245b914..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/cgo.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.5
-
-package loader
-
-// This file handles cgo preprocessing of files containing `import "C"`.
-//
-// DESIGN
-//
-// The approach taken is to run the cgo processor on the package's
-// CgoFiles and parse the output, faking the filenames of the
-// resulting ASTs so that the synthetic file containing the C types is
-// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
-// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
-// not the names of the actual temporary files.
-//
-// The advantage of this approach is its fidelity to 'go build'. The
-// downside is that the token.Position.Offset for each AST node is
-// incorrect, being an offset within the temporary file. Line numbers
-// should still be correct because of the //line comments.
-//
-// The logic of this file is mostly plundered from the 'go build'
-// tool, which also invokes the cgo preprocessor.
-//
-//
-// REJECTED ALTERNATIVE
-//
-// An alternative approach that we explored is to extend go/types'
-// Importer mechanism to provide the identity of the importing package
-// so that each time `import "C"` appears it resolves to a different
-// synthetic package containing just the objects needed in that case.
-// The loader would invoke cgo but parse only the cgo_types.go file
-// defining the package-level objects, discarding the other files
-// resulting from preprocessing.
-//
-// The benefit of this approach would have been that source-level
-// syntax information would correspond exactly to the original cgo
-// file, with no preprocessing involved, making source tools like
-// godoc, oracle, and eg happy. However, the approach was rejected
-// due to the additional complexity it would impose on go/types. (It
-// made for a beautiful demo, though.)
-//
-// cgo files, despite their *.go extension, are not legal Go source
-// files per the specification since they may refer to unexported
-// members of package "C" such as C.int. Also, a function such as
-// C.getpwent has in effect two types, one matching its C type and one
-// which additionally returns (errno C.int). The cgo preprocessor
-// uses name mangling to distinguish these two functions in the
-// processed code, but go/types would need to duplicate this logic in
-// its handling of function calls, analogous to the treatment of map
-// lookups in which y=m[k] and y,ok=m[k] are both legal.
-
-import (
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "strings"
-)
-
-// processCgoFiles invokes the cgo preprocessor on bp.CgoFiles, parses
-// the output and returns the resulting ASTs.
-//
-func processCgoFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
- tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
- if err != nil {
- return nil, err
- }
- defer os.RemoveAll(tmpdir)
-
- pkgdir := bp.Dir
- if DisplayPath != nil {
- pkgdir = DisplayPath(pkgdir)
- }
-
- cgoFiles, cgoDisplayFiles, err := runCgo(bp, pkgdir, tmpdir)
- if err != nil {
- return nil, err
- }
- var files []*ast.File
- for i := range cgoFiles {
- rd, err := os.Open(cgoFiles[i])
- if err != nil {
- return nil, err
- }
- display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
- f, err := parser.ParseFile(fset, display, rd, mode)
- rd.Close()
- if err != nil {
- return nil, err
- }
- files = append(files, f)
- }
- return files, nil
-}
-
-var cgoRe = regexp.MustCompile(`[/\\:]`)
-
-// runCgo invokes the cgo preprocessor on bp.CgoFiles and returns two
-// lists of files: the resulting processed files (in temporary
-// directory tmpdir) and the corresponding names of the unprocessed files.
-//
-// runCgo is adapted from (*builder).cgo in
-// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
-// Objective C, CGOPKGPATH, CGO_FLAGS.
-//
-func runCgo(bp *build.Package, pkgdir, tmpdir string) (files, displayFiles []string, err error) {
- cgoCPPFLAGS, _, _, _ := cflags(bp, true)
- _, cgoexeCFLAGS, _, _ := cflags(bp, false)
-
- if len(bp.CgoPkgConfig) > 0 {
- pcCFLAGS, err := pkgConfigFlags(bp)
- if err != nil {
- return nil, nil, err
- }
- cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
- }
-
- // Allows including _cgo_export.h from .[ch] files in the package.
- cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
-
- // _cgo_gotypes.go (displayed "C") contains the type definitions.
- files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
- displayFiles = append(displayFiles, "C")
- for _, fn := range bp.CgoFiles {
- // "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
- f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
- files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
- displayFiles = append(displayFiles, fn)
- }
-
- var cgoflags []string
- if bp.Goroot && bp.ImportPath == "runtime/cgo" {
- cgoflags = append(cgoflags, "-import_runtime_cgo=false")
- }
- if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
- cgoflags = append(cgoflags, "-import_syscall=false")
- }
-
- args := stringList(
- "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
- cgoCPPFLAGS, cgoexeCFLAGS, bp.CgoFiles,
- )
- if false {
- log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
- }
- cmd := exec.Command(args[0], args[1:]...)
- cmd.Dir = pkgdir
- cmd.Stdout = os.Stderr
- cmd.Stderr = os.Stderr
- if err := cmd.Run(); err != nil {
- return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
- }
-
- return files, displayFiles, nil
-}
-
-// -- unmodified from 'go build' ---------------------------------------
-
-// Return the flags to use when invoking the C or C++ compilers, or cgo.
-func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
- var defaults string
- if def {
- defaults = "-g -O2"
- }
-
- cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
- cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
- cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
- ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
- return
-}
-
-// envList returns the value of the given environment variable broken
-// into fields, using the default value when the variable is empty.
-func envList(key, def string) []string {
- v := os.Getenv(key)
- if v == "" {
- v = def
- }
- return strings.Fields(v)
-}
-
-// stringList's arguments should be a sequence of string or []string values.
-// stringList flattens them into a single []string.
-func stringList(args ...interface{}) []string {
- var x []string
- for _, arg := range args {
- switch arg := arg.(type) {
- case []string:
- x = append(x, arg...)
- case string:
- x = append(x, arg)
- default:
- panic("stringList: invalid argument")
- }
- }
- return x
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go
deleted file mode 100644
index de57422..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package loader
-
-import (
- "errors"
- "fmt"
- "go/build"
- "os/exec"
- "strings"
-)
-
-// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
-func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
- cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
- out, err := cmd.CombinedOutput()
- if err != nil {
- s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
- if len(out) > 0 {
- s = fmt.Sprintf("%s: %s", s, out)
- }
- return nil, errors.New(s)
- }
- if len(out) > 0 {
- flags = strings.Fields(string(out))
- }
- return
-}
-
-// pkgConfigFlags calls pkg-config if needed and returns the cflags
-// needed to build the package.
-func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
- if len(p.CgoPkgConfig) == 0 {
- return nil, nil
- }
- return pkgConfig("--cflags", p.CgoPkgConfig)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/doc.go
deleted file mode 100644
index 9b51c9e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/doc.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package loader loads a complete Go program from source code, parsing
-// and type-checking the initial packages plus their transitive closure
-// of dependencies. The ASTs and the derived facts are retained for
-// later use.
-//
-// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
-//
-// The package defines two primary types: Config, which specifies a
-// set of initial packages to load and various other options; and
-// Program, which is the result of successfully loading the packages
-// specified by a configuration.
-//
-// The configuration can be set directly, but *Config provides various
-// convenience methods to simplify the common cases, each of which can
-// be called any number of times. Finally, these are followed by a
-// call to Load() to actually load and type-check the program.
-//
-// var conf loader.Config
-//
-// // Use the command-line arguments to specify
-// // a set of initial packages to load from source.
-// // See FromArgsUsage for help.
-// rest, err := conf.FromArgs(os.Args[1:], wantTests)
-//
-// // Parse the specified files and create an ad hoc package with path "foo".
-// // All files must have the same 'package' declaration.
-// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
-//
-// // Create an ad hoc package with path "foo" from
-// // the specified already-parsed files.
-// // All ASTs must have the same 'package' declaration.
-// conf.CreateFromFiles("foo", parsedFiles)
-//
-// // Add "runtime" to the set of packages to be loaded.
-// conf.Import("runtime")
-//
-// // Adds "fmt" and "fmt_test" to the set of packages
-// // to be loaded. "fmt" will include *_test.go files.
-// conf.ImportWithTests("fmt")
-//
-// // Finally, load all the packages specified by the configuration.
-// prog, err := conf.Load()
-//
-// See examples_test.go for examples of API usage.
-//
-//
-// CONCEPTS AND TERMINOLOGY
-//
-// The WORKSPACE is the set of packages accessible to the loader. The
-// workspace is defined by Config.Build, a *build.Context. The
-// default context treats subdirectories of $GOROOT and $GOPATH as
-// packages, but this behavior may be overridden.
-//
-// An AD HOC package is one specified as a set of source files on the
-// command line. In the simplest case, it may consist of a single file
-// such as $GOROOT/src/net/http/triv.go.
-//
-// EXTERNAL TEST packages are those comprised of a set of *_test.go
-// files all with the same 'package foo_test' declaration, all in the
-// same directory. (go/build.Package calls these files XTestFiles.)
-//
-// An IMPORTABLE package is one that can be referred to by some import
-// spec. Every importable package is uniquely identified by its
-// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
-// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path
-// typically denotes a subdirectory of the workspace.
-//
-// An import declaration uses an IMPORT PATH to refer to a package.
-// Most import declarations use the package path as the import path.
-//
-// Due to VENDORING (https://golang.org/s/go15vendor), the
-// interpretation of an import path may depend on the directory in which
-// it appears. To resolve an import path to a package path, go/build
-// must search the enclosing directories for a subdirectory named
-// "vendor".
-//
-// ad hoc packages and external test packages are NON-IMPORTABLE. The
-// path of an ad hoc package is inferred from the package
-// declarations of its files and is therefore not a unique package key.
-// For example, Config.CreatePkgs may specify two initial ad hoc
-// packages, both with path "main".
-//
-// An AUGMENTED package is an importable package P plus all the
-// *_test.go files with same 'package foo' declaration as P.
-// (go/build.Package calls these files TestFiles.)
-//
-// The INITIAL packages are those specified in the configuration. A
-// DEPENDENCY is a package loaded to satisfy an import in an initial
-// package or another dependency.
-//
-package loader
-
-// IMPLEMENTATION NOTES
-//
-// 'go test', in-package test files, and import cycles
-// ---------------------------------------------------
-//
-// An external test package may depend upon members of the augmented
-// package that are not in the unaugmented package, such as functions
-// that expose internals. (See bufio/export_test.go for an example.)
-// So, the loader must ensure that for each external test package
-// it loads, it also augments the corresponding non-test package.
-//
-// The import graph over n unaugmented packages must be acyclic; the
-// import graph over n-1 unaugmented packages plus one augmented
-// package must also be acyclic. ('go test' relies on this.) But the
-// import graph over n augmented packages may contain cycles.
-//
-// First, all the (unaugmented) non-test packages and their
-// dependencies are imported in the usual way; the loader reports an
-// error if it detects an import cycle.
-//
-// Then, each package P for which testing is desired is augmented by
-// the list P' of its in-package test files, by calling
-// (*types.Checker).Files. This arrangement ensures that P' may
-// reference definitions within P, but P may not reference definitions
-// within P'. Furthermore, P' may import any other package, including
-// ones that depend upon P, without an import cycle error.
-//
-// Consider two packages A and B, both of which have lists of
-// in-package test files we'll call A' and B', and which have the
-// following import graph edges:
-// B imports A
-// B' imports A
-// A' imports B
-// This last edge would be expected to create an error were it not
-// for the special type-checking discipline above.
-// Cycles of size greater than two are possible. For example:
-// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
-// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
-// regexp/exec_test.go (package regexp) imports "compress/bzip2"
-//
-//
-// Concurrency
-// -----------
-//
-// Let us define the import dependency graph as follows. Each node is a
-// list of files passed to (Checker).Files at once. Many of these lists
-// are the production code of an importable Go package, so those nodes
-// are labelled by the package's path. The remaining nodes are
-// ad hoc packages and lists of in-package *_test.go files that augment
-// an importable package; those nodes have no label.
-//
-// The edges of the graph represent import statements appearing within a
-// file. An edge connects a node (a list of files) to the node it
-// imports, which is importable and thus always labelled.
-//
-// Loading is controlled by this dependency graph.
-//
-// To reduce I/O latency, we start loading a package's dependencies
-// asynchronously as soon as we've parsed its files and enumerated its
-// imports (scanImports). This performs a preorder traversal of the
-// import dependency graph.
-//
-// To exploit hardware parallelism, we type-check unrelated packages in
-// parallel, where "unrelated" means not ordered by the partial order of
-// the import dependency graph.
-//
-// We use a concurrency-safe non-blocking cache (importer.imported) to
-// record the results of type-checking, whether success or failure. An
-// entry is created in this cache by startLoad the first time the
-// package is imported. The first goroutine to request an entry becomes
-// responsible for completing the task and broadcasting completion to
-// subsequent requestors, which block until then.
-//
-// Type checking occurs in (parallel) postorder: we cannot type-check a
-// set of files until we have loaded and type-checked all of their
-// immediate dependencies (and thus all of their transitive
-// dependencies). If the input were guaranteed free of import cycles,
-// this would be trivial: we could simply wait for completion of the
-// dependencies and then invoke the typechecker.
-//
-// But as we saw in the 'go test' section above, some cycles in the
-// import graph over packages are actually legal, so long as the
-// cycle-forming edge originates in the in-package test files that
-// augment the package. This explains why the nodes of the import
-// dependency graph are not packages, but lists of files: the unlabelled
-// nodes avoid the cycles. Consider packages A and B where B imports A
-// and A's in-package tests AT import B. The naively constructed import
-// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
-// the graph over lists of files is AT --> B --> A, where AT is an
-// unlabelled node.
-//
-// Awaiting completion of the dependencies in a cyclic graph would
-// deadlock, so we must materialize the import dependency graph (as
-// importer.graph) and check whether each import edge forms a cycle. If
-// x imports y, and the graph already contains a path from y to x, then
-// there is an import cycle, in which case the processing of x must not
-// wait for the completion of processing of y.
-//
-// When the type-checker makes a callback (doImport) to the loader for a
-// given import edge, there are two possible cases. In the normal case,
-// the dependency has already been completely type-checked; doImport
-// does a cache lookup and returns it. In the cyclic case, the entry in
-// the cache is still necessarily incomplete, indicating a cycle. We
-// perform the cycle check again to obtain the error message, and return
-// the error.
-//
-// The result of using concurrency is about a 2.5x speedup for stdlib_test.
-
-// TODO(adonovan): overhaul the package documentation.
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/go16.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/go16.go
deleted file mode 100644
index c0ed50f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/go16.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.6
-
-package loader
-
-import "go/build"
-
-func init() {
- ignoreVendor = build.IgnoreVendor
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/loader.go
deleted file mode 100644
index f0171fc..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/loader.go
+++ /dev/null
@@ -1,1059 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.5
-
-package loader
-
-// See doc.go for package documentation and implementation notes.
-
-import (
- "errors"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "go/types"
- "os"
- "sort"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/go/ast/astutil"
-)
-
-var ignoreVendor build.ImportMode
-
-const trace = false // show timing info for type-checking
-
-// Config specifies the configuration for loading a whole program from
-// Go source code.
-// The zero value for Config is a ready-to-use default configuration.
-type Config struct {
- // Fset is the file set for the parser to use when loading the
- // program. If nil, it may be lazily initialized by any
- // method of Config.
- Fset *token.FileSet
-
- // ParserMode specifies the mode to be used by the parser when
- // loading source packages.
- ParserMode parser.Mode
-
- // TypeChecker contains options relating to the type checker.
- //
- // The supplied IgnoreFuncBodies is not used; the effective
- // value comes from the TypeCheckFuncBodies func below.
- // The supplied Import function is not used either.
- TypeChecker types.Config
-
- // TypeCheckFuncBodies is a predicate over package paths.
- // A package for which the predicate is false will
- // have its package-level declarations type checked, but not
- // its function bodies; this can be used to quickly load
- // dependencies from source. If nil, all func bodies are type
- // checked.
- TypeCheckFuncBodies func(path string) bool
-
- // If Build is non-nil, it is used to locate source packages.
- // Otherwise &build.Default is used.
- //
- // By default, cgo is invoked to preprocess Go files that
- // import the fake package "C". This behaviour can be
- // disabled by setting CGO_ENABLED=0 in the environment prior
- // to startup, or by setting Build.CgoEnabled=false.
- Build *build.Context
-
- // The current directory, used for resolving relative package
- // references such as "./go/loader". If empty, os.Getwd will be
- // used instead.
- Cwd string
-
- // If DisplayPath is non-nil, it is used to transform each
- // file name obtained from Build.Import(). This can be used
- // to prevent a virtualized build.Config's file names from
- // leaking into the user interface.
- DisplayPath func(path string) string
-
- // If AllowErrors is true, Load will return a Program even
- // if some of the its packages contained I/O, parser or type
- // errors; such errors are accessible via PackageInfo.Errors. If
- // false, Load will fail if any package had an error.
- AllowErrors bool
-
- // CreatePkgs specifies a list of non-importable initial
- // packages to create. The resulting packages will appear in
- // the corresponding elements of the Program.Created slice.
- CreatePkgs []PkgSpec
-
- // ImportPkgs specifies a set of initial packages to load.
- // The map keys are package paths.
- //
- // The map value indicates whether to load tests. If true, Load
- // will add and type-check two lists of files to the package:
- // non-test files followed by in-package *_test.go files. In
- // addition, it will append the external test package (if any)
- // to Program.Created.
- ImportPkgs map[string]bool
-
- // FindPackage is called during Load to create the build.Package
- // for a given import path from a given directory.
- // If FindPackage is nil, (*build.Context).Import is used.
- // A client may use this hook to adapt to a proprietary build
- // system that does not follow the "go build" layout
- // conventions, for example.
- //
- // It must be safe to call concurrently from multiple goroutines.
- FindPackage func(ctxt *build.Context, fromDir, importPath string, mode build.ImportMode) (*build.Package, error)
-
- // AfterTypeCheck is called immediately after a list of files
- // has been type-checked and appended to info.Files.
- //
- // This optional hook function is the earliest opportunity for
- // the client to observe the output of the type checker,
- // which may be useful to reduce analysis latency when loading
- // a large program.
- //
- // The function is permitted to modify info.Info, for instance
- // to clear data structures that are no longer needed, which can
- // dramatically reduce peak memory consumption.
- //
- // The function may be called twice for the same PackageInfo:
- // once for the files of the package and again for the
- // in-package test files.
- //
- // It must be safe to call concurrently from multiple goroutines.
- AfterTypeCheck func(info *PackageInfo, files []*ast.File)
-}
-
-// A PkgSpec specifies a non-importable package to be created by Load.
-// Files are processed first, but typically only one of Files and
-// Filenames is provided. The path needn't be globally unique.
-//
-type PkgSpec struct {
- Path string // package path ("" => use package declaration)
- Files []*ast.File // ASTs of already-parsed files
- Filenames []string // names of files to be parsed
-}
-
-// A Program is a Go program loaded from source as specified by a Config.
-type Program struct {
- Fset *token.FileSet // the file set for this program
-
- // Created[i] contains the initial package whose ASTs or
- // filenames were supplied by Config.CreatePkgs[i], followed by
- // the external test package, if any, of each package in
- // Config.ImportPkgs ordered by ImportPath.
- //
- // NOTE: these files must not import "C". Cgo preprocessing is
- // only performed on imported packages, not ad hoc packages.
- //
- // TODO(adonovan): we need to copy and adapt the logic of
- // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make
- // Config.Import and Config.Create methods return the same kind
- // of entity, essentially a build.Package.
- // Perhaps we can even reuse that type directly.
- Created []*PackageInfo
-
- // Imported contains the initially imported packages,
- // as specified by Config.ImportPkgs.
- Imported map[string]*PackageInfo
-
- // AllPackages contains the PackageInfo of every package
- // encountered by Load: all initial packages and all
- // dependencies, including incomplete ones.
- AllPackages map[*types.Package]*PackageInfo
-
- // importMap is the canonical mapping of package paths to
- // packages. It contains all Imported initial packages, but not
- // Created ones, and all imported dependencies.
- importMap map[string]*types.Package
-}
-
-// PackageInfo holds the ASTs and facts derived by the type-checker
-// for a single package.
-//
-// Not mutated once exposed via the API.
-//
-type PackageInfo struct {
- Pkg *types.Package
- Importable bool // true if 'import "Pkg.Path()"' would resolve to this
- TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors
- Files []*ast.File // syntax trees for the package's files
- Errors []error // non-nil if the package had errors
- types.Info // type-checker deductions.
- dir string // package directory
-
- checker *types.Checker // transient type-checker state
- errorFunc func(error)
-}
-
-func (info *PackageInfo) String() string { return info.Pkg.Path() }
-
-func (info *PackageInfo) appendError(err error) {
- if info.errorFunc != nil {
- info.errorFunc(err)
- } else {
- fmt.Fprintln(os.Stderr, err)
- }
- info.Errors = append(info.Errors, err)
-}
-
-func (conf *Config) fset() *token.FileSet {
- if conf.Fset == nil {
- conf.Fset = token.NewFileSet()
- }
- return conf.Fset
-}
-
-// ParseFile is a convenience function (intended for testing) that invokes
-// the parser using the Config's FileSet, which is initialized if nil.
-//
-// src specifies the parser input as a string, []byte, or io.Reader, and
-// filename is its apparent name. If src is nil, the contents of
-// filename are read from the file system.
-//
-func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) {
- // TODO(adonovan): use conf.build() etc like parseFiles does.
- return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode)
-}
-
-// FromArgsUsage is a partial usage message that applications calling
-// FromArgs may wish to include in their -help output.
-const FromArgsUsage = `
- is a list of arguments denoting a set of initial packages.
-It may take one of two forms:
-
-1. A list of *.go source files.
-
- All of the specified files are loaded, parsed and type-checked
- as a single package. All the files must belong to the same directory.
-
-2. A list of import paths, each denoting a package.
-
- The package's directory is found relative to the $GOROOT and
- $GOPATH using similar logic to 'go build', and the *.go files in
- that directory are loaded, parsed and type-checked as a single
- package.
-
- In addition, all *_test.go files in the directory are then loaded
- and parsed. Those files whose package declaration equals that of
- the non-*_test.go files are included in the primary package. Test
- files whose package declaration ends with "_test" are type-checked
- as another package, the 'external' test package, so that a single
- import path may denote two packages. (Whether this behaviour is
- enabled is tool-specific, and may depend on additional flags.)
-
-A '--' argument terminates the list of packages.
-`
-
-// FromArgs interprets args as a set of initial packages to load from
-// source and updates the configuration. It returns the list of
-// unconsumed arguments.
-//
-// It is intended for use in command-line interfaces that require a
-// set of initial packages to be specified; see FromArgsUsage message
-// for details.
-//
-// Only superficial errors are reported at this stage; errors dependent
-// on I/O are detected during Load.
-//
-func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) {
- var rest []string
- for i, arg := range args {
- if arg == "--" {
- rest = args[i+1:]
- args = args[:i]
- break // consume "--" and return the remaining args
- }
- }
-
- if len(args) > 0 && strings.HasSuffix(args[0], ".go") {
- // Assume args is a list of a *.go files
- // denoting a single ad hoc package.
- for _, arg := range args {
- if !strings.HasSuffix(arg, ".go") {
- return nil, fmt.Errorf("named files must be .go files: %s", arg)
- }
- }
- conf.CreateFromFilenames("", args...)
- } else {
- // Assume args are directories each denoting a
- // package and (perhaps) an external test, iff xtest.
- for _, arg := range args {
- if xtest {
- conf.ImportWithTests(arg)
- } else {
- conf.Import(arg)
- }
- }
- }
-
- return rest, nil
-}
-
-// CreateFromFilenames is a convenience function that adds
-// a conf.CreatePkgs entry to create a package of the specified *.go
-// files.
-//
-func (conf *Config) CreateFromFilenames(path string, filenames ...string) {
- conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames})
-}
-
-// CreateFromFiles is a convenience function that adds a conf.CreatePkgs
-// entry to create package of the specified path and parsed files.
-//
-func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
- conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files})
-}
-
-// ImportWithTests is a convenience function that adds path to
-// ImportPkgs, the set of initial source packages located relative to
-// $GOPATH. The package will be augmented by any *_test.go files in
-// its directory that contain a "package x" (not "package x_test")
-// declaration.
-//
-// In addition, if any *_test.go files contain a "package x_test"
-// declaration, an additional package comprising just those files will
-// be added to CreatePkgs.
-//
-func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) }
-
-// Import is a convenience function that adds path to ImportPkgs, the
-// set of initial packages that will be imported from source.
-//
-func (conf *Config) Import(path string) { conf.addImport(path, false) }
-
-func (conf *Config) addImport(path string, tests bool) {
- if path == "C" {
- return // ignore; not a real package
- }
- if conf.ImportPkgs == nil {
- conf.ImportPkgs = make(map[string]bool)
- }
- conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests
-}
-
-// PathEnclosingInterval returns the PackageInfo and ast.Node that
-// contain source interval [start, end), and all the node's ancestors
-// up to the AST root. It searches all ast.Files of all packages in prog.
-// exact is defined as for astutil.PathEnclosingInterval.
-//
-// The zero value is returned if not found.
-//
-func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) {
- for _, info := range prog.AllPackages {
- for _, f := range info.Files {
- if f.Pos() == token.NoPos {
- // This can happen if the parser saw
- // too many errors and bailed out.
- // (Use parser.AllErrors to prevent that.)
- continue
- }
- if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) {
- continue
- }
- if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
- return info, path, exact
- }
- }
- }
- return nil, nil, false
-}
-
-// InitialPackages returns a new slice containing the set of initial
-// packages (Created + Imported) in unspecified order.
-//
-func (prog *Program) InitialPackages() []*PackageInfo {
- infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported))
- infos = append(infos, prog.Created...)
- for _, info := range prog.Imported {
- infos = append(infos, info)
- }
- return infos
-}
-
-// Package returns the ASTs and results of type checking for the
-// specified package.
-func (prog *Program) Package(path string) *PackageInfo {
- if info, ok := prog.AllPackages[prog.importMap[path]]; ok {
- return info
- }
- for _, info := range prog.Created {
- if path == info.Pkg.Path() {
- return info
- }
- }
- return nil
-}
-
-// ---------- Implementation ----------
-
-// importer holds the working state of the algorithm.
-type importer struct {
- conf *Config // the client configuration
- start time.Time // for logging
-
- progMu sync.Mutex // guards prog
- prog *Program // the resulting program
-
- // findpkg is a memoization of FindPackage.
- findpkgMu sync.Mutex // guards findpkg
- findpkg map[findpkgKey]*findpkgValue
-
- importedMu sync.Mutex // guards imported
- imported map[string]*importInfo // all imported packages (incl. failures) by import path
-
- // import dependency graph: graph[x][y] => x imports y
- //
- // Since non-importable packages cannot be cyclic, we ignore
- // their imports, thus we only need the subgraph over importable
- // packages. Nodes are identified by their import paths.
- graphMu sync.Mutex
- graph map[string]map[string]bool
-}
-
-type findpkgKey struct {
- importPath string
- fromDir string
- mode build.ImportMode
-}
-
-type findpkgValue struct {
- ready chan struct{} // closed to broadcast readiness
- bp *build.Package
- err error
-}
-
-// importInfo tracks the success or failure of a single import.
-//
-// Upon completion, exactly one of info and err is non-nil:
-// info on successful creation of a package, err otherwise.
-// A successful package may still contain type errors.
-//
-type importInfo struct {
- path string // import path
- info *PackageInfo // results of typechecking (including errors)
- complete chan struct{} // closed to broadcast that info is set.
-}
-
-// awaitCompletion blocks until ii is complete,
-// i.e. the info field is safe to inspect.
-func (ii *importInfo) awaitCompletion() {
- <-ii.complete // wait for close
-}
-
-// Complete marks ii as complete.
-// Its info and err fields will not be subsequently updated.
-func (ii *importInfo) Complete(info *PackageInfo) {
- if info == nil {
- panic("info == nil")
- }
- ii.info = info
- close(ii.complete)
-}
-
-type importError struct {
- path string // import path
- err error // reason for failure to create a package
-}
-
-// Load creates the initial packages specified by conf.{Create,Import}Pkgs,
-// loading their dependencies packages as needed.
-//
-// On success, Load returns a Program containing a PackageInfo for
-// each package. On failure, it returns an error.
-//
-// If AllowErrors is true, Load will return a Program even if some
-// packages contained I/O, parser or type errors, or if dependencies
-// were missing. (Such errors are accessible via PackageInfo.Errors. If
-// false, Load will fail if any package had an error.
-//
-// It is an error if no packages were loaded.
-//
-func (conf *Config) Load() (*Program, error) {
- // Create a simple default error handler for parse/type errors.
- if conf.TypeChecker.Error == nil {
- conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }
- }
-
- // Set default working directory for relative package references.
- if conf.Cwd == "" {
- var err error
- conf.Cwd, err = os.Getwd()
- if err != nil {
- return nil, err
- }
- }
-
- // Install default FindPackage hook using go/build logic.
- if conf.FindPackage == nil {
- conf.FindPackage = (*build.Context).Import
- }
-
- prog := &Program{
- Fset: conf.fset(),
- Imported: make(map[string]*PackageInfo),
- importMap: make(map[string]*types.Package),
- AllPackages: make(map[*types.Package]*PackageInfo),
- }
-
- imp := importer{
- conf: conf,
- prog: prog,
- findpkg: make(map[findpkgKey]*findpkgValue),
- imported: make(map[string]*importInfo),
- start: time.Now(),
- graph: make(map[string]map[string]bool),
- }
-
- // -- loading proper (concurrent phase) --------------------------------
-
- var errpkgs []string // packages that contained errors
-
- // Load the initially imported packages and their dependencies,
- // in parallel.
- // No vendor check on packages imported from the command line.
- infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor)
- for _, ie := range importErrors {
- conf.TypeChecker.Error(ie.err) // failed to create package
- errpkgs = append(errpkgs, ie.path)
- }
- for _, info := range infos {
- prog.Imported[info.Pkg.Path()] = info
- }
-
- // Augment the designated initial packages by their tests.
- // Dependencies are loaded in parallel.
- var xtestPkgs []*build.Package
- for importPath, augment := range conf.ImportPkgs {
- if !augment {
- continue
- }
-
- // No vendor check on packages imported from command line.
- bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor)
- if err != nil {
- // Package not found, or can't even parse package declaration.
- // Already reported by previous loop; ignore it.
- continue
- }
-
- // Needs external test package?
- if len(bp.XTestGoFiles) > 0 {
- xtestPkgs = append(xtestPkgs, bp)
- }
-
- // Consult the cache using the canonical package path.
- path := bp.ImportPath
- imp.importedMu.Lock() // (unnecessary, we're sequential here)
- ii, ok := imp.imported[path]
- // Paranoid checks added due to issue #11012.
- if !ok {
- // Unreachable.
- // The previous loop called importAll and thus
- // startLoad for each path in ImportPkgs, which
- // populates imp.imported[path] with a non-zero value.
- panic(fmt.Sprintf("imported[%q] not found", path))
- }
- if ii == nil {
- // Unreachable.
- // The ii values in this loop are the same as in
- // the previous loop, which enforced the invariant
- // that at least one of ii.err and ii.info is non-nil.
- panic(fmt.Sprintf("imported[%q] == nil", path))
- }
- if ii.info == nil {
- // Unreachable.
- // awaitCompletion has the postcondition
- // ii.info != nil.
- panic(fmt.Sprintf("imported[%q].info = nil", path))
- }
- info := ii.info
- imp.importedMu.Unlock()
-
- // Parse the in-package test files.
- files, errs := imp.conf.parsePackageFiles(bp, 't')
- for _, err := range errs {
- info.appendError(err)
- }
-
- // The test files augmenting package P cannot be imported,
- // but may import packages that import P,
- // so we must disable the cycle check.
- imp.addFiles(info, files, false)
- }
-
- createPkg := func(path string, files []*ast.File, errs []error) {
- // TODO(adonovan): fix: use dirname of files, not cwd.
- info := imp.newPackageInfo(path, conf.Cwd)
- for _, err := range errs {
- info.appendError(err)
- }
-
- // Ad hoc packages are non-importable,
- // so no cycle check is needed.
- // addFiles loads dependencies in parallel.
- imp.addFiles(info, files, false)
- prog.Created = append(prog.Created, info)
- }
-
- // Create packages specified by conf.CreatePkgs.
- for _, cp := range conf.CreatePkgs {
- files, errs := parseFiles(conf.fset(), conf.build(), nil, ".", cp.Filenames, conf.ParserMode)
- files = append(files, cp.Files...)
-
- path := cp.Path
- if path == "" {
- if len(files) > 0 {
- path = files[0].Name.Name
- } else {
- path = "(unnamed)"
- }
- }
- createPkg(path, files, errs)
- }
-
- // Create external test packages.
- sort.Sort(byImportPath(xtestPkgs))
- for _, bp := range xtestPkgs {
- files, errs := imp.conf.parsePackageFiles(bp, 'x')
- createPkg(bp.ImportPath+"_test", files, errs)
- }
-
- // -- finishing up (sequential) ----------------------------------------
-
- if len(prog.Imported)+len(prog.Created) == 0 {
- return nil, errors.New("no initial packages were loaded")
- }
-
- // Create infos for indirectly imported packages.
- // e.g. incomplete packages without syntax, loaded from export data.
- for _, obj := range prog.importMap {
- info := prog.AllPackages[obj]
- if info == nil {
- prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true}
- } else {
- // finished
- info.checker = nil
- info.errorFunc = nil
- }
- }
-
- if !conf.AllowErrors {
- // Report errors in indirectly imported packages.
- for _, info := range prog.AllPackages {
- if len(info.Errors) > 0 {
- errpkgs = append(errpkgs, info.Pkg.Path())
- }
- }
- if errpkgs != nil {
- var more string
- if len(errpkgs) > 3 {
- more = fmt.Sprintf(" and %d more", len(errpkgs)-3)
- errpkgs = errpkgs[:3]
- }
- return nil, fmt.Errorf("couldn't load packages due to errors: %s%s",
- strings.Join(errpkgs, ", "), more)
- }
- }
-
- markErrorFreePackages(prog.AllPackages)
-
- return prog, nil
-}
-
-type byImportPath []*build.Package
-
-func (b byImportPath) Len() int { return len(b) }
-func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath }
-func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
-
-// markErrorFreePackages sets the TransitivelyErrorFree flag on all
-// applicable packages.
-func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) {
- // Build the transpose of the import graph.
- importedBy := make(map[*types.Package]map[*types.Package]bool)
- for P := range allPackages {
- for _, Q := range P.Imports() {
- clients, ok := importedBy[Q]
- if !ok {
- clients = make(map[*types.Package]bool)
- importedBy[Q] = clients
- }
- clients[P] = true
- }
- }
-
- // Find all packages reachable from some error package.
- reachable := make(map[*types.Package]bool)
- var visit func(*types.Package)
- visit = func(p *types.Package) {
- if !reachable[p] {
- reachable[p] = true
- for q := range importedBy[p] {
- visit(q)
- }
- }
- }
- for _, info := range allPackages {
- if len(info.Errors) > 0 {
- visit(info.Pkg)
- }
- }
-
- // Mark the others as "transitively error-free".
- for _, info := range allPackages {
- if !reachable[info.Pkg] {
- info.TransitivelyErrorFree = true
- }
- }
-}
-
-// build returns the effective build context.
-func (conf *Config) build() *build.Context {
- if conf.Build != nil {
- return conf.Build
- }
- return &build.Default
-}
-
-// parsePackageFiles enumerates the files belonging to package path,
-// then loads, parses and returns them, plus a list of I/O or parse
-// errors that were encountered.
-//
-// 'which' indicates which files to include:
-// 'g': include non-test *.go source files (GoFiles + processed CgoFiles)
-// 't': include in-package *_test.go source files (TestGoFiles)
-// 'x': include external *_test.go source files. (XTestGoFiles)
-//
-func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) {
- if bp.ImportPath == "unsafe" {
- return nil, nil
- }
- var filenames []string
- switch which {
- case 'g':
- filenames = bp.GoFiles
- case 't':
- filenames = bp.TestGoFiles
- case 'x':
- filenames = bp.XTestGoFiles
- default:
- panic(which)
- }
-
- files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode)
-
- // Preprocess CgoFiles and parse the outputs (sequentially).
- if which == 'g' && bp.CgoFiles != nil {
- cgofiles, err := processCgoFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode)
- if err != nil {
- errs = append(errs, err)
- } else {
- files = append(files, cgofiles...)
- }
- }
-
- return files, errs
-}
-
-// doImport imports the package denoted by path.
-// It implements the types.Importer signature.
-//
-// It returns an error if a package could not be created
-// (e.g. go/build or parse error), but type errors are reported via
-// the types.Config.Error callback (the first of which is also saved
-// in the package's PackageInfo).
-//
-// Idempotent.
-//
-func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) {
- if to == "C" {
- // This should be unreachable, but ad hoc packages are
- // not currently subject to cgo preprocessing.
- // See https://github.com/golang/go/issues/11627.
- return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`,
- from.Pkg.Path())
- }
-
- bp, err := imp.findPackage(to, from.dir, 0)
- if err != nil {
- return nil, err
- }
-
- // The standard unsafe package is handled specially,
- // and has no PackageInfo.
- if bp.ImportPath == "unsafe" {
- return types.Unsafe, nil
- }
-
- // Look for the package in the cache using its canonical path.
- path := bp.ImportPath
- imp.importedMu.Lock()
- ii := imp.imported[path]
- imp.importedMu.Unlock()
- if ii == nil {
- panic("internal error: unexpected import: " + path)
- }
- if ii.info != nil {
- return ii.info.Pkg, nil
- }
-
- // Import of incomplete package: this indicates a cycle.
- fromPath := from.Pkg.Path()
- if cycle := imp.findPath(path, fromPath); cycle != nil {
- cycle = append([]string{fromPath}, cycle...)
- return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> "))
- }
-
- panic("internal error: import of incomplete (yet acyclic) package: " + fromPath)
-}
-
-// findPackage locates the package denoted by the importPath in the
-// specified directory.
-func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) {
- // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7)
- // to avoid holding the lock around FindPackage.
- key := findpkgKey{importPath, fromDir, mode}
- imp.findpkgMu.Lock()
- v, ok := imp.findpkg[key]
- if ok {
- // cache hit
- imp.findpkgMu.Unlock()
-
- <-v.ready // wait for entry to become ready
- } else {
- // Cache miss: this goroutine becomes responsible for
- // populating the map entry and broadcasting its readiness.
- v = &findpkgValue{ready: make(chan struct{})}
- imp.findpkg[key] = v
- imp.findpkgMu.Unlock()
-
- ioLimit <- true
- v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode)
- <-ioLimit
-
- if _, ok := v.err.(*build.NoGoError); ok {
- v.err = nil // empty directory is not an error
- }
-
- close(v.ready) // broadcast ready condition
- }
- return v.bp, v.err
-}
-
-// importAll loads, parses, and type-checks the specified packages in
-// parallel and returns their completed importInfos in unspecified order.
-//
-// fromPath is the package path of the importing package, if it is
-// importable, "" otherwise. It is used for cycle detection.
-//
-// fromDir is the directory containing the import declaration that
-// caused these imports.
-//
-func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) {
- // TODO(adonovan): opt: do the loop in parallel once
- // findPackage is non-blocking.
- var pending []*importInfo
- for importPath := range imports {
- bp, err := imp.findPackage(importPath, fromDir, mode)
- if err != nil {
- errors = append(errors, importError{
- path: importPath,
- err: err,
- })
- continue
- }
- pending = append(pending, imp.startLoad(bp))
- }
-
- if fromPath != "" {
- // We're loading a set of imports.
- //
- // We must record graph edges from the importing package
- // to its dependencies, and check for cycles.
- imp.graphMu.Lock()
- deps, ok := imp.graph[fromPath]
- if !ok {
- deps = make(map[string]bool)
- imp.graph[fromPath] = deps
- }
- for _, ii := range pending {
- deps[ii.path] = true
- }
- imp.graphMu.Unlock()
- }
-
- for _, ii := range pending {
- if fromPath != "" {
- if cycle := imp.findPath(ii.path, fromPath); cycle != nil {
- // Cycle-forming import: we must not await its
- // completion since it would deadlock.
- //
- // We don't record the error in ii since
- // the error is really associated with the
- // cycle-forming edge, not the package itself.
- // (Also it would complicate the
- // invariants of importPath completion.)
- if trace {
- fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle)
- }
- continue
- }
- }
- ii.awaitCompletion()
- infos = append(infos, ii.info)
- }
-
- return infos, errors
-}
-
-// findPath returns an arbitrary path from 'from' to 'to' in the import
-// graph, or nil if there was none.
-func (imp *importer) findPath(from, to string) []string {
- imp.graphMu.Lock()
- defer imp.graphMu.Unlock()
-
- seen := make(map[string]bool)
- var search func(stack []string, importPath string) []string
- search = func(stack []string, importPath string) []string {
- if !seen[importPath] {
- seen[importPath] = true
- stack = append(stack, importPath)
- if importPath == to {
- return stack
- }
- for x := range imp.graph[importPath] {
- if p := search(stack, x); p != nil {
- return p
- }
- }
- }
- return nil
- }
- return search(make([]string, 0, 20), from)
-}
-
-// startLoad initiates the loading, parsing and type-checking of the
-// specified package and its dependencies, if it has not already begun.
-//
-// It returns an importInfo, not necessarily in a completed state. The
-// caller must call awaitCompletion() before accessing its info field.
-//
-// startLoad is concurrency-safe and idempotent.
-//
-func (imp *importer) startLoad(bp *build.Package) *importInfo {
- path := bp.ImportPath
- imp.importedMu.Lock()
- ii, ok := imp.imported[path]
- if !ok {
- ii = &importInfo{path: path, complete: make(chan struct{})}
- imp.imported[path] = ii
- go func() {
- info := imp.load(bp)
- ii.Complete(info)
- }()
- }
- imp.importedMu.Unlock()
-
- return ii
-}
-
-// load implements package loading by parsing Go source files
-// located by go/build.
-func (imp *importer) load(bp *build.Package) *PackageInfo {
- info := imp.newPackageInfo(bp.ImportPath, bp.Dir)
- info.Importable = true
- files, errs := imp.conf.parsePackageFiles(bp, 'g')
- for _, err := range errs {
- info.appendError(err)
- }
-
- imp.addFiles(info, files, true)
-
- imp.progMu.Lock()
- imp.prog.importMap[bp.ImportPath] = info.Pkg
- imp.progMu.Unlock()
-
- return info
-}
-
-// addFiles adds and type-checks the specified files to info, loading
-// their dependencies if needed. The order of files determines the
-// package initialization order. It may be called multiple times on the
-// same package. Errors are appended to the info.Errors field.
-//
-// cycleCheck determines whether the imports within files create
-// dependency edges that should be checked for potential cycles.
-//
-func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) {
- // Ensure the dependencies are loaded, in parallel.
- var fromPath string
- if cycleCheck {
- fromPath = info.Pkg.Path()
- }
- // TODO(adonovan): opt: make the caller do scanImports.
- // Callers with a build.Package can skip it.
- imp.importAll(fromPath, info.dir, scanImports(files), 0)
-
- if trace {
- fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n",
- time.Since(imp.start), info.Pkg.Path(), len(files))
- }
-
- // Ignore the returned (first) error since we
- // already collect them all in the PackageInfo.
- info.checker.Files(files)
- info.Files = append(info.Files, files...)
-
- if imp.conf.AfterTypeCheck != nil {
- imp.conf.AfterTypeCheck(info, files)
- }
-
- if trace {
- fmt.Fprintf(os.Stderr, "%s: stop %q\n",
- time.Since(imp.start), info.Pkg.Path())
- }
-}
-
-func (imp *importer) newPackageInfo(path, dir string) *PackageInfo {
- pkg := types.NewPackage(path, "")
- info := &PackageInfo{
- Pkg: pkg,
- Info: types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- },
- errorFunc: imp.conf.TypeChecker.Error,
- dir: dir,
- }
-
- // Copy the types.Config so we can vary it across PackageInfos.
- tc := imp.conf.TypeChecker
- tc.IgnoreFuncBodies = false
- if f := imp.conf.TypeCheckFuncBodies; f != nil {
- tc.IgnoreFuncBodies = !f(path)
- }
- tc.Importer = closure{imp, info}
- tc.Error = info.appendError // appendError wraps the user's Error function
-
- info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info)
- imp.progMu.Lock()
- imp.prog.AllPackages[pkg] = info
- imp.progMu.Unlock()
- return info
-}
-
-type closure struct {
- imp *importer
- info *PackageInfo
-}
-
-func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) }
diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/util.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/util.go
deleted file mode 100644
index 7f38dd7..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/go/loader/util.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package loader
-
-import (
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "io"
- "os"
- "strconv"
- "sync"
-
- "golang.org/x/tools/go/buildutil"
-)
-
-// We use a counting semaphore to limit
-// the number of parallel I/O calls per process.
-var ioLimit = make(chan bool, 10)
-
-// parseFiles parses the Go source files within directory dir and
-// returns the ASTs of the ones that could be at least partially parsed,
-// along with a list of I/O and parse errors encountered.
-//
-// I/O is done via ctxt, which may specify a virtual file system.
-// displayPath is used to transform the filenames attached to the ASTs.
-//
-func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
- if displayPath == nil {
- displayPath = func(path string) string { return path }
- }
- var wg sync.WaitGroup
- n := len(files)
- parsed := make([]*ast.File, n)
- errors := make([]error, n)
- for i, file := range files {
- if !buildutil.IsAbsPath(ctxt, file) {
- file = buildutil.JoinPath(ctxt, dir, file)
- }
- wg.Add(1)
- go func(i int, file string) {
- ioLimit <- true // wait
- defer func() {
- wg.Done()
- <-ioLimit // signal
- }()
- var rd io.ReadCloser
- var err error
- if ctxt.OpenFile != nil {
- rd, err = ctxt.OpenFile(file)
- } else {
- rd, err = os.Open(file)
- }
- if err != nil {
- errors[i] = err // open failed
- return
- }
-
- // ParseFile may return both an AST and an error.
- parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
- rd.Close()
- }(i, file)
- }
- wg.Wait()
-
- // Eliminate nils, preserving order.
- var o int
- for _, f := range parsed {
- if f != nil {
- parsed[o] = f
- o++
- }
- }
- parsed = parsed[:o]
-
- o = 0
- for _, err := range errors {
- if err != nil {
- errors[o] = err
- o++
- }
- }
- errors = errors[:o]
-
- return parsed, errors
-}
-
-// scanImports returns the set of all import paths from all
-// import specs in the specified files.
-func scanImports(files []*ast.File) map[string]bool {
- imports := make(map[string]bool)
- for _, f := range files {
- for _, decl := range f.Decls {
- if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
- for _, spec := range decl.Specs {
- spec := spec.(*ast.ImportSpec)
-
- // NB: do not assume the program is well-formed!
- path, err := strconv.Unquote(spec.Path.Value)
- if err != nil {
- continue // quietly ignore the error
- }
- if path == "C" {
- continue // skip pseudopackage
- }
- imports[path] = true
- }
- }
- }
- }
- return imports
-}
-
-// ---------- Internal helpers ----------
-
-// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
-func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
- p := int(pos)
- base := f.Base()
- return base <= p && p < base+f.Size()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/assert.go b/vendor/github.com/aws/aws-sdk-go/awstesting/assert.go
deleted file mode 100644
index 10ad727..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/assert.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package awstesting
-
-import (
- "encoding/json"
- "encoding/xml"
- "fmt"
- "net/url"
- "reflect"
- "regexp"
- "sort"
- "testing"
-)
-
-// Match is a testing helper to test for testing error by comparing expected
-// with a regular expression.
-func Match(t *testing.T, regex, expected string) {
- if !regexp.MustCompile(regex).Match([]byte(expected)) {
- t.Errorf("%q\n\tdoes not match /%s/", expected, regex)
- }
-}
-
-// AssertURL verifies the expected URL is matches the actual.
-func AssertURL(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool {
- expectURL, err := url.Parse(expect)
- if err != nil {
- t.Errorf(errMsg("unable to parse expected URL", err, msgAndArgs))
- return false
- }
- actualURL, err := url.Parse(actual)
- if err != nil {
- t.Errorf(errMsg("unable to parse actual URL", err, msgAndArgs))
- return false
- }
-
- equal(t, expectURL.Host, actualURL.Host, msgAndArgs...)
- equal(t, expectURL.Scheme, actualURL.Scheme, msgAndArgs...)
- equal(t, expectURL.Path, actualURL.Path, msgAndArgs...)
-
- return AssertQuery(t, expectURL.Query().Encode(), actualURL.Query().Encode(), msgAndArgs...)
-}
-
-// AssertQuery verifies the expect HTTP query string matches the actual.
-func AssertQuery(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool {
- expectQ, err := url.ParseQuery(expect)
- if err != nil {
- t.Errorf(errMsg("unable to parse expected Query", err, msgAndArgs))
- return false
- }
- actualQ, err := url.ParseQuery(expect)
- if err != nil {
- t.Errorf(errMsg("unable to parse actual Query", err, msgAndArgs))
- return false
- }
-
- // Make sure the keys are the same
- if !equal(t, queryValueKeys(expectQ), queryValueKeys(actualQ), msgAndArgs...) {
- return false
- }
-
- for k, expectQVals := range expectQ {
- sort.Strings(expectQVals)
- actualQVals := actualQ[k]
- sort.Strings(actualQVals)
- equal(t, expectQVals, actualQVals, msgAndArgs...)
- }
-
- return true
-}
-
-// AssertJSON verifies that the expect json string matches the actual.
-func AssertJSON(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool {
- expectVal := map[string]interface{}{}
- if err := json.Unmarshal([]byte(expect), &expectVal); err != nil {
- t.Errorf(errMsg("unable to parse expected JSON", err, msgAndArgs...))
- return false
- }
-
- actualVal := map[string]interface{}{}
- if err := json.Unmarshal([]byte(actual), &actualVal); err != nil {
- t.Errorf(errMsg("unable to parse actual JSON", err, msgAndArgs...))
- return false
- }
-
- return equal(t, expectVal, actualVal, msgAndArgs...)
-}
-
-// AssertXML verifies that the expect xml string matches the actual.
-func AssertXML(t *testing.T, expect, actual string, container interface{}, msgAndArgs ...interface{}) bool {
- expectVal := container
- if err := xml.Unmarshal([]byte(expect), &expectVal); err != nil {
- t.Errorf(errMsg("unable to parse expected XML", err, msgAndArgs...))
- }
-
- actualVal := container
- if err := xml.Unmarshal([]byte(actual), &actualVal); err != nil {
- t.Errorf(errMsg("unable to parse actual XML", err, msgAndArgs...))
- }
- return equal(t, expectVal, actualVal, msgAndArgs...)
-}
-
-// objectsAreEqual determines if two objects are considered equal.
-//
-// This function does no assertion of any kind.
-//
-// Based on github.com/stretchr/testify/assert.ObjectsAreEqual
-// Copied locally to prevent non-test build dependencies on testify
-func objectsAreEqual(expected, actual interface{}) bool {
- if expected == nil || actual == nil {
- return expected == actual
- }
-
- return reflect.DeepEqual(expected, actual)
-}
-
-// Equal asserts that two objects are equal.
-//
-// assert.Equal(t, 123, 123, "123 and 123 should be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-//
-// Based on github.com/stretchr/testify/assert.Equal
-// Copied locally to prevent non-test build dependencies on testify
-func equal(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) bool {
- if !objectsAreEqual(expected, actual) {
- t.Errorf("Not Equal:\n\t%#v (expected)\n\t%#v (actual), %s",
- expected, actual, messageFromMsgAndArgs(msgAndArgs))
- return false
- }
-
- return true
-}
-
-func errMsg(baseMsg string, err error, msgAndArgs ...interface{}) string {
- message := messageFromMsgAndArgs(msgAndArgs)
- if message != "" {
- message += ", "
- }
- return fmt.Sprintf("%s%s, %v", message, baseMsg, err)
-}
-
-// Based on github.com/stretchr/testify/assert.messageFromMsgAndArgs
-// Copied locally to prevent non-test build dependencies on testify
-func messageFromMsgAndArgs(msgAndArgs []interface{}) string {
- if len(msgAndArgs) == 0 || msgAndArgs == nil {
- return ""
- }
- if len(msgAndArgs) == 1 {
- return msgAndArgs[0].(string)
- }
- if len(msgAndArgs) > 1 {
- return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
- }
- return ""
-}
-
-func queryValueKeys(v url.Values) []string {
- keys := make([]string, 0, len(v))
- for k := range v {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- return keys
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/assert_test.go b/vendor/github.com/aws/aws-sdk-go/awstesting/assert_test.go
deleted file mode 100644
index 45903a5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/assert_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package awstesting_test
-
-import (
- "encoding/xml"
- "testing"
-
- "github.com/aws/aws-sdk-go/awstesting"
-)
-
-func TestAssertJSON(t *testing.T) {
- cases := []struct {
- e, a string
- asserts bool
- }{
- {
- e: `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`,
- a: `{"RecursiveStruct":{"RecursiveMap":{"bar":{"NoRecurse":"bar"},"foo":{"NoRecurse":"foo"}}}}`,
- asserts: true,
- },
- }
-
- for i, c := range cases {
- mockT := &testing.T{}
- if awstesting.AssertJSON(mockT, c.e, c.a) != c.asserts {
- t.Error("Assert JSON result was not expected.", i)
- }
- }
-}
-
-func TestAssertXML(t *testing.T) {
- cases := []struct {
- e, a string
- asserts bool
- container struct {
- XMLName xml.Name `xml:"OperationRequest"`
- NS string `xml:"xmlns,attr"`
- RecursiveStruct struct {
- RecursiveMap struct {
- Entries []struct {
- XMLName xml.Name `xml:"entries"`
- Key string `xml:"key"`
- Value struct {
- XMLName xml.Name `xml:"value"`
- NoRecurse string
- }
- }
- }
- }
- }
- }{
- {
- e: `foofoobarbar`,
- a: `barbarfoofoo`,
- asserts: true,
- },
- }
-
- for i, c := range cases {
- // mockT := &testing.T{}
- if awstesting.AssertXML(t, c.e, c.a, c.container) != c.asserts {
- t.Error("Assert XML result was not expected.", i)
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/client.go
deleted file mode 100644
index ca64a44..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/client.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package awstesting
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/defaults"
-)
-
-// NewClient creates and initializes a generic service client for testing.
-func NewClient(cfgs ...*aws.Config) *client.Client {
- info := metadata.ClientInfo{
- Endpoint: "http://endpoint",
- SigningName: "",
- }
- def := defaults.Get()
- def.Config.MergeIn(cfgs...)
-
- return client.New(*def.Config, info, def.Handlers)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go
deleted file mode 100644
index 93d5ff6..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// +build integration
-
-// Package s3_test runs integration tests for S3
-package s3_test
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/awstesting/integration"
- "github.com/aws/aws-sdk-go/service/s3"
-)
-
-var bucketName *string
-var svc *s3.S3
-
-func TestMain(m *testing.M) {
- setup()
- defer teardown() // only called if we panic
- result := m.Run()
- teardown()
- os.Exit(result)
-}
-
-// Create a bucket for testing
-func setup() {
- svc = s3.New(integration.Session)
- bucketName = aws.String(
- fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID()))
-
- for i := 0; i < 10; i++ {
- _, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName})
- if err == nil {
- break
- }
- }
-
- for {
- _, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName})
- if err == nil {
- break
- }
- time.Sleep(1 * time.Second)
- }
-}
-
-// Delete the bucket
-func teardown() {
- resp, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName})
- for _, o := range resp.Contents {
- svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key})
- }
- svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName})
-}
-
-func TestWriteToObject(t *testing.T) {
- _, err := svc.PutObject(&s3.PutObjectInput{
- Bucket: bucketName,
- Key: aws.String("key name"),
- Body: bytes.NewReader([]byte("hello world")),
- })
- assert.NoError(t, err)
-
- resp, err := svc.GetObject(&s3.GetObjectInput{
- Bucket: bucketName,
- Key: aws.String("key name"),
- })
- assert.NoError(t, err)
-
- b, _ := ioutil.ReadAll(resp.Body)
- assert.Equal(t, []byte("hello world"), b)
-}
-
-func TestPresignedGetPut(t *testing.T) {
- putreq, _ := svc.PutObjectRequest(&s3.PutObjectInput{
- Bucket: bucketName,
- Key: aws.String("presigned-key"),
- })
- var err error
-
- // Presign a PUT request
- var puturl string
- puturl, err = putreq.Presign(300 * time.Second)
- assert.NoError(t, err)
-
- // PUT to the presigned URL with a body
- var puthttpreq *http.Request
- buf := bytes.NewReader([]byte("hello world"))
- puthttpreq, err = http.NewRequest("PUT", puturl, buf)
- assert.NoError(t, err)
-
- var putresp *http.Response
- putresp, err = http.DefaultClient.Do(puthttpreq)
- assert.NoError(t, err)
- assert.Equal(t, 200, putresp.StatusCode)
-
- // Presign a GET on the same URL
- getreq, _ := svc.GetObjectRequest(&s3.GetObjectInput{
- Bucket: bucketName,
- Key: aws.String("presigned-key"),
- })
-
- var geturl string
- geturl, err = getreq.Presign(300 * time.Second)
- assert.NoError(t, err)
-
- // Get the body
- var getresp *http.Response
- getresp, err = http.Get(geturl)
- assert.NoError(t, err)
-
- var b []byte
- defer getresp.Body.Close()
- b, err = ioutil.ReadAll(getresp.Body)
- assert.Equal(t, "hello world", string(b))
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/client.go
deleted file mode 100644
index 83632d1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/client.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build integration
-
-//Package s3crypto provides gucumber integration tests support.
-package s3crypto
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/aws/aws-sdk-go/service/s3/s3crypto"
-
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@s3crypto", func() {
- sess := session.New((&aws.Config{
- Region: aws.String("us-west-2"),
- }).WithLogLevel(aws.LogDebugWithRequestRetries | aws.LogDebugWithRequestErrors))
- encryptionClient := s3crypto.NewEncryptionClient(sess, nil, func(c *s3crypto.EncryptionClient) {
- })
- gucumber.World["encryptionClient"] = encryptionClient
-
- decryptionClient := s3crypto.NewDecryptionClient(sess)
- gucumber.World["decryptionClient"] = decryptionClient
-
- gucumber.World["client"] = s3.New(sess)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/s3_crypto.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/s3_crypto.feature
deleted file mode 100644
index a7d433a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/s3_crypto.feature
+++ /dev/null
@@ -1,18 +0,0 @@
-# language: en
-@s3crypto @client
-Feature: S3 Integration Crypto Tests
-
- Scenario: Get all plaintext fixtures for symmetric masterkey aes cbc
- When I get all fixtures for "aes_gcm" from "aws-s3-shared-tests"
- Then I decrypt each fixture against "Java" "version_2"
- And I compare the decrypted ciphertext to the plaintext
-
- Scenario: Uploading Go's SDK fixtures
- When I get all fixtures for "aes_gcm" from "aws-s3-shared-tests"
- Then I encrypt each fixture with "kms" "AWS_SDK_TEST_ALIAS" "us-west-2" and "aes_gcm"
- And upload "Go" data with folder "version_2"
-
- Scenario: Get all plaintext fixtures for symmetric masterkey aes gcm
- When I get all fixtures for "aes_gcm" from "aws-s3-shared-tests"
- Then I decrypt each fixture against "Go" "version_2"
- And I compare the decrypted ciphertext to the plaintext
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/stepdef.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/stepdef.go
deleted file mode 100644
index b558d8a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3crypto/stepdef.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// +build integration
-
-// Package s3crypto contains shared step definitions that are used across integration tests
-package s3crypto
-
-import (
- "bytes"
- "encoding/base64"
- "errors"
- "io/ioutil"
- "strings"
-
- "github.com/gucumber/gucumber"
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/kms"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/aws/aws-sdk-go/service/s3/s3crypto"
-)
-
-func init() {
- gucumber.When(`^I get all fixtures for "(.+?)" from "(.+?)"$`,
- func(cekAlg, bucket string) {
- prefix := "plaintext_test_case_"
- baseFolder := "crypto_tests/" + cekAlg
- s3Client := gucumber.World["client"].(*s3.S3)
-
- out, err := s3Client.ListObjects(&s3.ListObjectsInput{
- Bucket: aws.String(bucket),
- Prefix: aws.String(baseFolder + "/" + prefix),
- })
- assert.NoError(gucumber.T, err)
-
- plaintexts := make(map[string][]byte)
- for _, obj := range out.Contents {
- plaintextKey := obj.Key
- ptObj, err := s3Client.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(bucket),
- Key: plaintextKey,
- })
- assert.NoError(gucumber.T, err)
- caseKey := strings.TrimPrefix(*plaintextKey, baseFolder+"/"+prefix)
- plaintext, err := ioutil.ReadAll(ptObj.Body)
- assert.NoError(gucumber.T, err)
-
- plaintexts[caseKey] = plaintext
- }
- gucumber.World["baseFolder"] = baseFolder
- gucumber.World["bucket"] = bucket
- gucumber.World["plaintexts"] = plaintexts
- })
-
- gucumber.Then(`^I decrypt each fixture against "(.+?)" "(.+?)"$`, func(lang, version string) {
- plaintexts := gucumber.World["plaintexts"].(map[string][]byte)
- baseFolder := gucumber.World["baseFolder"].(string)
- bucket := gucumber.World["bucket"].(string)
- prefix := "ciphertext_test_case_"
- s3Client := gucumber.World["client"].(*s3.S3)
- s3CryptoClient := gucumber.World["decryptionClient"].(*s3crypto.DecryptionClient)
- language := "language_" + lang
-
- ciphertexts := make(map[string][]byte)
- for caseKey := range plaintexts {
- cipherKey := baseFolder + "/" + version + "/" + language + "/" + prefix + caseKey
-
- // To get metadata for encryption key
- ctObj, err := s3Client.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(bucket),
- Key: &cipherKey,
- })
- if err != nil {
- continue
- }
-
- // We don't support wrap, so skip it
- if *ctObj.Metadata["X-Amz-Wrap-Alg"] != "kms" {
- continue
- }
- //masterkeyB64 := ctObj.Metadata["Masterkey"]
- //masterkey, err := base64.StdEncoding.DecodeString(*masterkeyB64)
- //assert.NoError(T, err)
-
- //s3CryptoClient.Config.MasterKey = masterkey
- ctObj, err = s3CryptoClient.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(bucket),
- Key: &cipherKey,
- },
- )
- assert.NoError(gucumber.T, err)
-
- ciphertext, err := ioutil.ReadAll(ctObj.Body)
- assert.NoError(gucumber.T, err)
- ciphertexts[caseKey] = ciphertext
- }
- gucumber.World["ciphertexts"] = ciphertexts
- })
-
- gucumber.And(`^I compare the decrypted ciphertext to the plaintext$`, func() {
- plaintexts := gucumber.World["plaintexts"].(map[string][]byte)
- ciphertexts := gucumber.World["ciphertexts"].(map[string][]byte)
- for caseKey, ciphertext := range ciphertexts {
- assert.Equal(gucumber.T, len(plaintexts[caseKey]), len(ciphertext))
- assert.True(gucumber.T, bytes.Equal(plaintexts[caseKey], ciphertext))
- }
- })
-
- gucumber.Then(`^I encrypt each fixture with "(.+?)" "(.+?)" "(.+?)" and "(.+?)"$`, func(kek, v1, v2, cek string) {
- var handler s3crypto.CipherDataGenerator
- var builder s3crypto.ContentCipherBuilder
- switch kek {
- case "kms":
- arn, err := getAliasInformation(v1, v2)
- assert.Nil(gucumber.T, err)
-
- b64Arn := base64.StdEncoding.EncodeToString([]byte(arn))
- assert.Nil(gucumber.T, err)
- gucumber.World["Masterkey"] = b64Arn
-
- handler = s3crypto.NewKMSKeyGenerator(kms.New(session.New(&aws.Config{
- Region: &v2,
- })), arn)
- assert.Nil(gucumber.T, err)
- default:
- gucumber.T.Skip()
- }
-
- switch cek {
- case "aes_gcm":
- builder = s3crypto.AESGCMContentCipherBuilder(handler)
- default:
- gucumber.T.Skip()
- }
-
- sess := session.New(&aws.Config{
- Region: aws.String("us-west-2"),
- })
- c := s3crypto.NewEncryptionClient(sess, builder, func(c *s3crypto.EncryptionClient) {
- })
- gucumber.World["encryptionClient"] = c
- gucumber.World["cek"] = cek
- })
-
- gucumber.And(`^upload "(.+?)" data with folder "(.+?)"$`, func(language, folder string) {
- c := gucumber.World["encryptionClient"].(*s3crypto.EncryptionClient)
- cek := gucumber.World["cek"].(string)
- bucket := gucumber.World["bucket"].(string)
- plaintexts := gucumber.World["plaintexts"].(map[string][]byte)
- key := gucumber.World["Masterkey"].(string)
- for caseKey, plaintext := range plaintexts {
- input := &s3.PutObjectInput{
- Bucket: &bucket,
- Key: aws.String("crypto_tests/" + cek + "/" + folder + "/language_" + language + "/ciphertext_test_case_" + caseKey),
- Body: bytes.NewReader(plaintext),
- Metadata: map[string]*string{
- "Masterkey": &key,
- },
- }
-
- _, err := c.PutObject(input)
- assert.Nil(gucumber.T, err)
- }
- })
-}
-
-func getAliasInformation(alias, region string) (string, error) {
- arn := ""
- svc := kms.New(session.New(&aws.Config{
- Region: ®ion,
- }))
-
- truncated := true
- var marker *string
- for truncated {
- out, err := svc.ListAliases(&kms.ListAliasesInput{
- Marker: marker,
- })
- if err != nil {
- return arn, err
- }
- for _, aliasEntry := range out.Aliases {
- if *aliasEntry.AliasName == "alias/"+alias {
- return *aliasEntry.AliasArn, nil
- }
- }
- truncated = *out.Truncated
- marker = out.NextMarker
- }
-
- return "", errors.New("The alias " + alias + " does not exist in your account. Please add the proper alias to a key")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go
deleted file mode 100644
index 3a0dd60..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// +build integration
-
-// Package s3manager provides
-package s3manager
-
-import (
- "bytes"
- "crypto/md5"
- "fmt"
- "io"
- "os"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting/integration"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/aws/aws-sdk-go/service/s3/s3manager"
-)
-
-var integBuf12MB = make([]byte, 1024*1024*12)
-var integMD512MB = fmt.Sprintf("%x", md5.Sum(integBuf12MB))
-var bucketName *string
-
-func TestMain(m *testing.M) {
- setup()
- defer teardown() // only called if we panic
- result := m.Run()
- teardown()
- os.Exit(result)
-}
-
-func setup() {
- // Create a bucket for testing
- svc := s3.New(integration.Session)
- bucketName = aws.String(
- fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID()))
-
- for i := 0; i < 10; i++ {
- _, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName})
- if err == nil {
- break
- }
- }
-
- for {
- _, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName})
- if err == nil {
- break
- }
- time.Sleep(1 * time.Second)
- }
-}
-
-// Delete the bucket
-func teardown() {
- svc := s3.New(integration.Session)
-
- objs, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName})
- for _, o := range objs.Contents {
- svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key})
- }
-
- uploads, _ := svc.ListMultipartUploads(&s3.ListMultipartUploadsInput{Bucket: bucketName})
- for _, u := range uploads.Uploads {
- svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
- Bucket: bucketName,
- Key: u.Key,
- UploadId: u.UploadId,
- })
- }
-
- svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName})
-}
-
-type dlwriter struct {
- buf []byte
-}
-
-func newDLWriter(size int) *dlwriter {
- return &dlwriter{buf: make([]byte, size)}
-}
-
-func (d dlwriter) WriteAt(p []byte, pos int64) (n int, err error) {
- if pos > int64(len(d.buf)) {
- return 0, io.EOF
- }
-
- written := 0
- for i, b := range p {
- if i >= len(d.buf) {
- break
- }
- d.buf[pos+int64(i)] = b
- written++
- }
- return written, nil
-}
-
-func validate(t *testing.T, key string, md5value string) {
- mgr := s3manager.NewDownloader(integration.Session)
- params := &s3.GetObjectInput{Bucket: bucketName, Key: &key}
-
- w := newDLWriter(1024 * 1024 * 20)
- n, err := mgr.Download(w, params)
- assert.NoError(t, err)
- assert.Equal(t, md5value, fmt.Sprintf("%x", md5.Sum(w.buf[0:n])))
-}
-
-func TestUploadConcurrently(t *testing.T) {
- key := "12mb-1"
- mgr := s3manager.NewUploader(integration.Session)
- out, err := mgr.Upload(&s3manager.UploadInput{
- Bucket: bucketName,
- Key: &key,
- Body: bytes.NewReader(integBuf12MB),
- })
-
- assert.NoError(t, err)
- assert.NotEqual(t, "", out.UploadID)
- assert.Regexp(t, `^https?://.+/`+key+`$`, out.Location)
-
- validate(t, key, integMD512MB)
-}
-
-func TestUploadFailCleanup(t *testing.T) {
- svc := s3.New(integration.Session)
-
- // Break checksum on 2nd part so it fails
- part := 0
- svc.Handlers.Build.PushBack(func(r *request.Request) {
- if r.Operation.Name == "UploadPart" {
- if part == 1 {
- r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000")
- }
- part++
- }
- })
-
- key := "12mb-leave"
- mgr := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) {
- u.LeavePartsOnError = false
- })
- _, err := mgr.Upload(&s3manager.UploadInput{
- Bucket: bucketName,
- Key: &key,
- Body: bytes.NewReader(integBuf12MB),
- })
- assert.Error(t, err)
- assert.NotContains(t, err.Error(), "MissingRegion")
- uploadID := ""
- if merr, ok := err.(s3manager.MultiUploadFailure); ok {
- uploadID = merr.UploadID()
- }
- assert.NotEmpty(t, uploadID)
-
- _, err = svc.ListParts(&s3.ListPartsInput{
- Bucket: bucketName, Key: &key, UploadId: &uploadID})
- assert.Error(t, err)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go
deleted file mode 100644
index 9434ae9..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go
+++ /dev/null
@@ -1 +0,0 @@
-package s3manager
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go
deleted file mode 100644
index 3ed7f97..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go
+++ /dev/null
@@ -1 +0,0 @@
-package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/integration.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/integration.go
deleted file mode 100644
index 88bcf16..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/integration.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build integration
-
-// Package integration performs initialization and validation for integration
-// tests.
-package integration
-
-import (
- "crypto/rand"
- "fmt"
- "io"
- "os"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
-)
-
-// Session is a shared session for all integration tests to use.
-var Session = session.Must(session.NewSession())
-
-func init() {
- logLevel := Session.Config.LogLevel
- if os.Getenv("DEBUG") != "" {
- logLevel = aws.LogLevel(aws.LogDebug)
- }
- if os.Getenv("DEBUG_SIGNING") != "" {
- logLevel = aws.LogLevel(aws.LogDebugWithSigning)
- }
- if os.Getenv("DEBUG_BODY") != "" {
- logLevel = aws.LogLevel(aws.LogDebugWithSigning | aws.LogDebugWithHTTPBody)
- }
- Session.Config.LogLevel = logLevel
-
- if aws.StringValue(Session.Config.Region) == "" {
- panic("AWS_REGION must be configured to run integration tests")
- }
-}
-
-// UniqueID returns a unique UUID-like identifier for use in generating
-// resources for integration tests.
-func UniqueID() string {
- uuid := make([]byte, 16)
- io.ReadFull(rand.Reader, uuid)
- return fmt.Sprintf("%x", uuid)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature
deleted file mode 100644
index dc28b55..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature
+++ /dev/null
@@ -1,14 +0,0 @@
-#language en
-@acm @client
-Feature: AWS Certificate Manager
-
- Scenario: Making a request
- When I call the "ListCertificates" API
- Then the request should be successful
-
- Scenario: Handling errors
- When I attempt to call the "GetCertificate" API with:
- | CertificateArn | arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 |
- Then I expect the response error code to be "ResourceNotFoundException"
- And I expect the response error message not be empty
-
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go
deleted file mode 100644
index fdb6438..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package acm provides gucumber integration tests support.
-package acm
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/acm"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@acm", func() {
- gucumber.World["client"] = acm.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature
deleted file mode 100644
index 4286b81..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@apigateway @client
-Feature: Amazon API Gateway
-
- Scenario: Making a request
- When I call the "GetAccountRequest" API
- Then the request should be successful
-
- Scenario: Handing errors
- When I attempt to call the "GetRestApi" API with:
- | RestApiId | api123 |
- Then I expect the response error code to be "NotFoundException"
- And I expect the response error message to include:
- """
- Invalid REST API identifier specified
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go
deleted file mode 100644
index 10ee2de..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package apigateway provides gucumber integration tests support.
-package apigateway
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/apigateway"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@apigateway", func() {
- gucumber.World["client"] = apigateway.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/applicationdiscoveryservice.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/applicationdiscoveryservice.feature
deleted file mode 100644
index 02ae287..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/applicationdiscoveryservice.feature
+++ /dev/null
@@ -1,8 +0,0 @@
-#language en
-@applicationdiscoveryservice @client
-Feature: AWS Application Discovery Service
-
- Scenario: Making a request
- When I call the "DescribeAgents" API
- Then the request should be successful
-
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/client.go
deleted file mode 100644
index 85a4dab..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/client.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build integration
-
-//Package applicationdiscoveryservice provides gucumber integration tests support.
-package applicationdiscoveryservice
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/applicationdiscoveryservice"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@applicationdiscoveryservice", func() {
- gucumber.World["client"] = applicationdiscoveryservice.New(
- smoke.Session, &aws.Config{Region: aws.String("us-west-2")},
- )
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature
deleted file mode 100644
index 7c2bdf6..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature
+++ /dev/null
@@ -1,18 +0,0 @@
-# language: en
-@autoscaling @client
-Feature: Auto Scaling
-
- Scenario: Making a request
- When I call the "DescribeScalingProcessTypes" API
- Then the value at "Processes" should be a list
-
- Scenario: Handing errors
- When I attempt to call the "CreateLaunchConfiguration" API with:
- | LaunchConfigurationName | |
- | ImageId | ami-12345678 |
- | InstanceType | m1.small |
- Then I expect the response error code to be "InvalidParameter"
- And I expect the response error message to include:
- """
- LaunchConfigurationName
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go
deleted file mode 100644
index 55c68d1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package autoscaling provides gucumber integration tests support.
-package autoscaling
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/autoscaling"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@autoscaling", func() {
- gucumber.World["client"] = autoscaling.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go
deleted file mode 100644
index 079fde7..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cloudformation provides gucumber integration tests support.
-package cloudformation
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cloudformation"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cloudformation", func() {
- gucumber.World["client"] = cloudformation.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature
deleted file mode 100644
index 3eafaf6..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-# language: en
-@cloudformation @client
-Feature: AWS CloudFormation
-
- Scenario: Making a request
- When I call the "ListStacks" API
- Then the value at "StackSummaries" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "CreateStack" API with:
- | StackName | fakestack |
- | TemplateURL | http://s3.amazonaws.com/foo/bar |
- Then I expect the response error code to be "ValidationError"
- And I expect the response error message to include:
- """
- TemplateURL must reference a valid S3 object to which you have access.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go
deleted file mode 100644
index c958362..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cloudfront provides gucumber integration tests support.
-package cloudfront
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cloudfront"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cloudfront", func() {
- gucumber.World["client"] = cloudfront.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature
deleted file mode 100644
index bbb2a8d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-# language: en
-@cloudfront @client
-Feature: Amazon CloudFront
-
- Scenario: Making a basic request
- When I call the "ListDistributions" API with:
- | MaxItems | 1 |
- Then the value at "DistributionList.Items" should be a list
-
- Scenario: Error handling
- When I attempt to call the "GetDistribution" API with:
- | Id | fake-id |
- Then I expect the response error code to be "NoSuchDistribution"
- And I expect the response error message to include:
- """
- The specified distribution does not exist.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go
deleted file mode 100644
index 23f24be..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cloudhsm provides gucumber integration tests support.
-package cloudhsm
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cloudhsm"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cloudhsm", func() {
- gucumber.World["client"] = cloudhsm.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature
deleted file mode 100644
index 545ca4e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@cloudhsm @client
-Feature: Amazon CloudHSM
-
- Scenario: Making a request
- When I call the "ListHapgs" API
- Then the value at "HapgList" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeHapg" API with:
- | HapgArn | bogus-arn |
- Then I expect the response error code to be "ValidationException"
- And I expect the response error message to include:
- """
- Value 'bogus-arn' at 'hapgArn' failed to satisfy constraint
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go
deleted file mode 100644
index c346b28..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cloudsearch provides gucumber integration tests support.
-package cloudsearch
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cloudsearch"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cloudsearch", func() {
- gucumber.World["client"] = cloudsearch.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature
deleted file mode 100644
index 160e916..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@cloudsearch @client
-Feature: Amazon CloudSearch
-
- Scenario: Making a request
- When I call the "DescribeDomains" API
- Then the response should contain a "DomainStatusList"
-
- Scenario: Handling errors
- When I attempt to call the "DescribeIndexFields" API with:
- | DomainName | fakedomain |
- Then I expect the response error code to be "ResourceNotFound"
- And I expect the response error message to include:
- """
- Domain not found: fakedomain
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go
deleted file mode 100644
index 97c7bfa..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cloudtrail provides gucumber integration tests support.
-package cloudtrail
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cloudtrail"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cloudtrail", func() {
- gucumber.World["client"] = cloudtrail.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature
deleted file mode 100644
index 817ab5c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature
+++ /dev/null
@@ -1,12 +0,0 @@
-# language: en
-@cloudtrail @client
-Feature: AWS CloudTrail
-
- Scenario: Making a request
- When I call the "DescribeTrails" API
- Then the request should be successful
-
- Scenario: Handling errors
- When I attempt to call the "DeleteTrail" API with:
- | Name | faketrail |
- Then I expect the response error code to be "TrailNotFoundException"
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go
deleted file mode 100644
index ebc339d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cloudwatch provides gucumber integration tests support.
-package cloudwatch
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cloudwatch"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cloudwatch", func() {
- gucumber.World["client"] = cloudwatch.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature
deleted file mode 100644
index 84307ef..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature
+++ /dev/null
@@ -1,19 +0,0 @@
-# language: en
-@cloudwatch @monitoring @client
-Feature: Amazon CloudWatch
-
- Scenario: Making a request
- When I call the "ListMetrics" API with:
- | Namespace | AWS/EC2 |
- Then the value at "Metrics" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "SetAlarmState" API with:
- | AlarmName | abc |
- | StateValue | mno |
- | StateReason | xyz |
- Then I expect the response error code to be "ValidationError"
- And I expect the response error message to include:
- """
- failed to satisfy constraint
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go
deleted file mode 100644
index 75fa2c5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cloudwatchlogs provides gucumber integration tests support.
-package cloudwatchlogs
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cloudwatchlogs", func() {
- gucumber.World["client"] = cloudwatchlogs.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature
deleted file mode 100644
index 5711c4e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-# language: en
-@cloudwatchlogs @logs
-Feature: Amazon CloudWatch Logs
-
- Scenario: Making a request
- When I call the "DescribeLogGroups" API
- Then the value at "logGroups" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetLogEvents" API with:
- | logGroupName | fakegroup |
- | logStreamName | fakestream |
- Then I expect the response error code to be "ResourceNotFoundException"
- And I expect the response error message to include:
- """
- The specified log group does not exist.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go
deleted file mode 100644
index 2f9da34..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package codecommit provides gucumber integration tests support.
-package codecommit
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/codecommit"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@codecommit", func() {
- gucumber.World["client"] = codecommit.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature
deleted file mode 100644
index c5c0190..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@codecommit @client
-Feature: Amazon CodeCommit
-
- Scenario: Making a request
- When I call the "ListRepositories" API
- Then the value at "repositories" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "ListBranches" API with:
- | repositoryName | fake-repo |
- Then I expect the response error code to be "RepositoryDoesNotExistException"
- And I expect the response error message to include:
- """
- fake-repo does not exist
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go
deleted file mode 100644
index 29587b9..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package codedeploy provides gucumber integration tests support.
-package codedeploy
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/codedeploy"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@codedeploy", func() {
- gucumber.World["client"] = codedeploy.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature
deleted file mode 100644
index 45dfd2f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@codedeploy @client
-Feature: Amazon CodeDeploy
-
- Scenario: Making a request
- When I call the "ListApplications" API
- Then the value at "applications" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetDeployment" API with:
- | deploymentId | d-USUAELQEX |
- Then I expect the response error code to be "DeploymentDoesNotExistException"
- And I expect the response error message to include:
- """
- The deployment d-USUAELQEX could not be found
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go
deleted file mode 100644
index edc34f6..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package codepipeline provides gucumber integration tests support.
-package codepipeline
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/codepipeline"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@codepipeline", func() {
- gucumber.World["client"] = codepipeline.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature
deleted file mode 100644
index cb962cc..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@codepipeline @client
-Feature: Amazon CodePipeline
-
- Scenario: Making a request
- When I call the "ListPipelines" API
- Then the value at "pipelines" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetPipeline" API with:
- | name | fake-pipeline |
- Then I expect the response error code to be "PipelineNotFoundException"
- And I expect the response error message to include:
- """
- does not have a pipeline with name 'fake-pipeline'
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go
deleted file mode 100644
index 476169f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cognitoidentity provides gucumber integration tests support.
-package cognitoidentity
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cognitoidentity"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cognitoidentity", func() {
- gucumber.World["client"] = cognitoidentity.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature
deleted file mode 100644
index 12abcc8..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature
+++ /dev/null
@@ -1,19 +0,0 @@
-# language: en
-@cognitoidentity @client
-Feature: Amazon Cognito Idenity
-
- Scenario: Making a request
- When I call the "ListIdentityPools" API with JSON:
- """
- {"MaxResults": 10}
- """
- Then the value at "IdentityPools" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeIdentityPool" API with:
- | IdentityPoolId | us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee |
- Then I expect the response error code to be "ResourceNotFoundException"
- And I expect the response error message to include:
- """
- IdentityPool 'us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' not found
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go
deleted file mode 100644
index 585e47c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package cognitosync provides gucumber integration tests support.
-package cognitosync
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/cognitosync"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@cognitosync", func() {
- gucumber.World["client"] = cognitosync.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature
deleted file mode 100644
index 3cdf84e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@cognitosync @client
-Feature: Amazon Cognito Sync
-
- Scenario: Making a request
- When I call the "ListIdentityPoolUsage" API
- Then the value at "IdentityPoolUsages" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeIdentityPoolUsage" API with:
- | IdentityPoolId | us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee |
- Then I expect the response error code to be "ResourceNotFoundException"
- And I expect the response error message to include:
- """
- IdentityPool 'us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' not found
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go
deleted file mode 100644
index fed6229..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package configservice provides gucumber integration tests support.
-package configservice
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/configservice"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@configservice", func() {
- gucumber.World["client"] = configservice.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature
deleted file mode 100644
index ccc3af6..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-# language: en
-@configservice @config @client
-Feature: AWS Config
-
- Scenario: Making a request
- When I call the "DescribeConfigurationRecorders" API
- Then the value at "ConfigurationRecorders" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetResourceConfigHistory" API with:
- | resourceType | fake-type |
- | resourceId | fake-id |
- Then I expect the response error code to be "ValidationException"
- And I expect the response error message to include:
- """
- failed to satisfy constraint
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go
deleted file mode 100644
index 10bb6f1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package datapipeline provides gucumber integration tests support.
-package datapipeline
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/datapipeline"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@datapipeline", func() {
- gucumber.World["client"] = datapipeline.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature
deleted file mode 100644
index db31518..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@datapipeline @client
-Feature: AWS Data Pipeline
-
- Scenario: Making a request
- When I call the "ListPipelines" API
- Then the response should contain a "pipelineIdList"
-
- Scenario: Handling errors
- When I attempt to call the "GetPipelineDefinition" API with:
- | pipelineId | fake-id |
- Then I expect the response error code to be "PipelineNotFoundException"
- And I expect the response error message to include:
- """
- does not exist
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go
deleted file mode 100644
index f1bcbf7..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build integration
-
-//Package devicefarm provides gucumber integration tests support.
-package devicefarm
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/devicefarm"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@devicefarm", func() {
- // FIXME remove custom region
- gucumber.World["client"] = devicefarm.New(smoke.Session,
- aws.NewConfig().WithRegion("us-west-2"))
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature
deleted file mode 100644
index 1d200a9..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@devicefarm @client
-Feature: AWS Device Farm
-
- Scenario: Making a request
- When I call the "ListDevices" API
- Then the value at "devices" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetDevice" API with:
- | arn | arn:aws:devicefarm:us-west-2::device:000000000000000000000000fake-arn |
- Then I expect the response error code to be "NotFoundException"
- And I expect the response error message to include:
- """
- No device was found for arn
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go
deleted file mode 100644
index c86e5d8..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package directconnect provides gucumber integration tests support.
-package directconnect
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/directconnect"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@directconnect", func() {
- gucumber.World["client"] = directconnect.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature
deleted file mode 100644
index 3efd9c7..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@directconnect @client
-Feature: AWS Direct Connect
-
- Scenario: Making a request
- When I call the "DescribeConnections" API
- Then the value at "connections" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeConnections" API with:
- | connectionId | fake-connection |
- Then I expect the response error code to be "DirectConnectClientException"
- And I expect the response error message to include:
- """
- Connection ID fake-connection has an invalid format
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go
deleted file mode 100644
index ae2fbba..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package directoryservice provides gucumber integration tests support.
-package directoryservice
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/directoryservice"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@directoryservice", func() {
- gucumber.World["client"] = directoryservice.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature
deleted file mode 100644
index 315839b..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-# language: en
-@directoryservice @ds @client
-Feature: AWS Directory Service
-
- I want to use AWS Directory Service
-
- Scenario: Making a request
- When I call the "DescribeDirectories" API
- Then the value at "DirectoryDescriptions" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "CreateDirectory" API with:
- | Name | |
- | Password | |
- | Size | |
- Then I expect the response error code to be "ValidationException"
-
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go
deleted file mode 100644
index 5e3d3fb..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package dynamodb provides gucumber integration tests support.
-package dynamodb
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@dynamodb", func() {
- gucumber.World["client"] = dynamodb.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature
deleted file mode 100644
index 1df6b3c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature
+++ /dev/null
@@ -1,19 +0,0 @@
-# language: en
-@dynamodb @client
-Feature: Amazon DynamoDB
-
- Scenario: Making a request
- When I call the "ListTables" API with JSON:
- """
- {"Limit": 1}
- """
- Then the value at "TableNames" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeTable" API with:
- | TableName | fake-table |
- Then I expect the response error code to be "ResourceNotFoundException"
- And I expect the response error message to include:
- """
- Requested resource not found: Table: fake-table not found
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go
deleted file mode 100644
index 64cedf2..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package dynamodbstreams provides gucumber integration tests support.
-package dynamodbstreams
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/dynamodbstreams"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@dynamodbstreams", func() {
- gucumber.World["client"] = dynamodbstreams.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature
deleted file mode 100644
index 6e35e29..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@dynamodbstreams @client
-Feature: Amazon DynamoDB Streams
-
- Scenario: Making a request
- When I call the "ListStreams" API
- Then the value at "Streams" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeStream" API with:
- | StreamArn | fake-stream |
- Then I expect the response error code to be "InvalidParameter"
- And I expect the response error message to include:
- """
- StreamArn
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go
deleted file mode 100644
index 6820153..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package ec2 provides gucumber integration tests support.
-package ec2
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/ec2"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@ec2", func() {
- gucumber.World["client"] = ec2.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature
deleted file mode 100644
index e238c2c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature
+++ /dev/null
@@ -1,18 +0,0 @@
-# language: en
-@ec2 @client
-Feature: Amazon Elastic Compute Cloud
-
- Scenario: Making a request
- When I call the "DescribeRegions" API
- Then the value at "Regions" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeInstances" API with JSON:
- """
- {"InstanceIds": ["i-12345678"]}
- """
- Then I expect the response error code to be "InvalidInstanceID.NotFound"
- And I expect the response error message to include:
- """
- The instance ID 'i-12345678' does not exist
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go
deleted file mode 100644
index 0db8224..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build integration
-
-//Package ecs provides gucumber integration tests support.
-package ecs
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/ecs"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@ecs", func() {
- // FIXME remove custom region
- gucumber.World["client"] = ecs.New(smoke.Session,
- aws.NewConfig().WithRegion("us-west-2"))
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature
deleted file mode 100644
index 6942137..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature
+++ /dev/null
@@ -1,14 +0,0 @@
-# language: en
-@ecs @client
-Feature: Amazon ECS
-
- I want to use Amazon ECS
-
- Scenario: Making a request
- When I call the "ListClusters" API
- Then the value at "clusterArns" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "StopTask" API with:
- | task | xxxxxxxxxxx-xxxxxxxxxxxx-xxxxxxxxxxx |
- Then the error code should be "ClusterNotFoundException"
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go
deleted file mode 100644
index fba6a32..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build integration
-
-//Package efs provides gucumber integration tests support.
-package efs
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/efs"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@efs", func() {
- // FIXME remove custom region
- gucumber.World["client"] = efs.New(smoke.Session,
- aws.NewConfig().WithRegion("us-west-2"))
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature
deleted file mode 100644
index 113dd35..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature
+++ /dev/null
@@ -1,14 +0,0 @@
-# language: en
-@efs @elasticfilesystem @client
-Feature: Amazon Elastic File System
-
- I want to use Amazon Elastic File System
-
- Scenario: Making a request
- When I call the "DescribeFileSystems" API
- Then the value at "FileSystems" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DeleteFileSystem" API with:
- | FileSystemId | fake-id |
- Then the error code should be "BadRequest"
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go
deleted file mode 100644
index 386237f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package elasticache provides gucumber integration tests support.
-package elasticache
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/elasticache"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@elasticache", func() {
- gucumber.World["client"] = elasticache.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature
deleted file mode 100644
index 48828ca..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@elasticache @client
-Feature: ElastiCache
-
- Scenario: Making a request
- When I call the "DescribeEvents" API
- Then the value at "Events" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeCacheClusters" API with:
- | CacheClusterId | fake_cluster |
- Then I expect the response error code to be "InvalidParameterValue"
- And I expect the response error message to include:
- """
- The parameter CacheClusterIdentifier is not a valid identifier.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go
deleted file mode 100644
index 61cb2e1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package elasticbeanstalk provides gucumber integration tests support.
-package elasticbeanstalk
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/elasticbeanstalk"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@elasticbeanstalk", func() {
- gucumber.World["client"] = elasticbeanstalk.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature
deleted file mode 100644
index 35b1ad8..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@elasticbeanstalk @client
-Feature: AWS Elastic Beanstalk
-
- Scenario: Making a request
- When I call the "ListAvailableSolutionStacks" API
- Then the value at "SolutionStacks" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeEnvironmentResources" API with:
- | EnvironmentId | fake_environment |
- Then I expect the response error code to be "InvalidParameterValue"
- And I expect the response error message to include:
- """
- No Environment found for EnvironmentId = 'fake_environment'.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go
deleted file mode 100644
index 6682a77..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package elasticloadbalancing provides gucumber integration tests support.
-package elasticloadbalancing
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/elb"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@elasticloadbalancing", func() {
- gucumber.World["client"] = elb.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature
deleted file mode 100644
index a8c7209..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature
+++ /dev/null
@@ -1,18 +0,0 @@
-# language: en
-@elasticloadbalancing @client
-Feature: Elastic Load Balancing
-
- Scenario: Making a request
- When I call the "DescribeLoadBalancers" API
- Then the value at "LoadBalancerDescriptions" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeLoadBalancers" API with JSON:
- """
- {"LoadBalancerNames": ["fake_load_balancer"]}
- """
- Then I expect the response error code to be "ValidationError"
- And I expect the response error message to include:
- """
- LoadBalancer name cannot contain characters that are not letters, or digits or the dash.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go
deleted file mode 100644
index 7e29b47..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package elastictranscoder provides gucumber integration tests support.
-package elastictranscoder
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/elastictranscoder"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@elastictranscoder", func() {
- gucumber.World["client"] = elastictranscoder.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature
deleted file mode 100644
index 77658e6..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@elastictranscoder @client
-Feature: Amazon Elastic Transcoder
-
- Scenario: Making a request
- When I call the "ListPresets" API
- Then the value at "Presets" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "ReadJob" API with:
- | Id | fake_job |
- Then I expect the response error code to be "ValidationException"
- And I expect the response error message to include:
- """
- Value 'fake_job' at 'id' failed to satisfy constraint
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go
deleted file mode 100644
index 41295c7..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package emr provides gucumber integration tests support.
-package emr
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/emr"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@emr", func() {
- gucumber.World["client"] = emr.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature
deleted file mode 100644
index 133c174..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@emr @client @elasticmapreduce
-Feature: Amazon EMR
-
- Scenario: Making a request
- When I call the "ListClusters" API
- Then the value at "Clusters" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeCluster" API with:
- | ClusterId | fake_cluster |
- Then I expect the response error code to be "InvalidRequestException"
- And I expect the response error message to include:
- """
- Cluster id 'fake_cluster' is not valid.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go
deleted file mode 100644
index 33e59c4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package es provides gucumber integration tests support.
-package es
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/elasticsearchservice"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@es", func() {
- gucumber.World["client"] = elasticsearchservice.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature
deleted file mode 100644
index 8bd1f1e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@es @elasticsearchservice
-Feature: Amazon ElasticsearchService
-
- Scenario: Making a request
- When I call the "ListDomainNames" API
- Then the value at "DomainNames" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeElasticsearchDomain" API with:
- | DomainName | not-a-domain |
- Then the error code should be "ResourceNotFoundException"
- And I expect the response error message to include:
- """
- Domain not found: not-a-domain
- """
\ No newline at end of file
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go
deleted file mode 100644
index 26235ab..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package glacier provides gucumber integration tests support.
-package glacier
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/glacier"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@glacier", func() {
- gucumber.World["client"] = glacier.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature
deleted file mode 100644
index 0e1a113..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@glacier @client
-Feature: Amazon Glacier
-
- Scenario: Making a request
- When I call the "ListVaults" API
- Then the response should contain a "VaultList"
-
- Scenario: Handling errors
- When I attempt to call the "ListVaults" API with:
- | accountId | abcmnoxyz |
- Then I expect the response error code to be "UnrecognizedClientException"
- And I expect the response error message to include:
- """
- No account found for the given parameters
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go
deleted file mode 100644
index d551c73..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package iam provides gucumber integration tests support.
-package iam
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@iam", func() {
- gucumber.World["client"] = iam.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature
deleted file mode 100644
index 0da6463..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@iam @client
-Feature: AWS Identity and Access Management
-
- Scenario: Making a request
- When I call the "ListUsers" API
- Then the value at "Users" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetUser" API with:
- | UserName | fake_user |
- Then I expect the response error code to be "NoSuchEntity"
- And I expect the response error message to include:
- """
- The user with name fake_user cannot be found.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go
deleted file mode 100644
index 30921c3..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build integration
-
-//Package iotdataplane provides gucumber integration tests support.
-package iotdataplane
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/iot"
- "github.com/aws/aws-sdk-go/service/iotdataplane"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@iotdataplane", func() {
- svc := iot.New(smoke.Session)
- result, err := svc.DescribeEndpoint(&iot.DescribeEndpointInput{})
- if err != nil {
- gucumber.World["error"] = err
- return
- }
-
- gucumber.World["client"] = iotdataplane.New(smoke.Session, aws.NewConfig().
- WithEndpoint(*result.EndpointAddress))
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature
deleted file mode 100644
index 515d9bb..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature
+++ /dev/null
@@ -1,9 +0,0 @@
-# language: en
-@iotdataplane @client
-
-Feature: AWS IoT Data Plane
-
- Scenario: Handling errors
- When I attempt to call the "GetThingShadow" API with:
- | thingName | fake-thing |
- Then I expect the response error code to be "ResourceNotFoundException"
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go
deleted file mode 100644
index 5081bfe..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package kinesis provides gucumber integration tests support.
-package kinesis
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/kinesis"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@kinesis", func() {
- gucumber.World["client"] = kinesis.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature
deleted file mode 100644
index 570505c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@kinesis @client
-Feature: AWS Kinesis
-
- Scenario: Making a request
- When I call the "ListStreams" API
- Then the value at "StreamNames" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeStream" API with:
- | StreamName | bogus-stream-name |
- Then I expect the response error code to be "ResourceNotFoundException"
- And I expect the response error message to include:
- """
- Stream bogus-stream-name under account
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go
deleted file mode 100644
index e9498b3..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package kms provides gucumber integration tests support.
-package kms
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/kms"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@kms", func() {
- gucumber.World["client"] = kms.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature
deleted file mode 100644
index ee428ab..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature
+++ /dev/null
@@ -1,13 +0,0 @@
-# language: en
-@kms @client
-Feature: Amazon Key Management Service
-
- Scenario: Making a request
- When I call the "ListAliases" API
- Then the value at "Aliases" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetKeyPolicy" API with:
- | KeyId | fake-key |
- | PolicyName | fakepolicy |
- Then I expect the response error code to be "NotFoundException"
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go
deleted file mode 100644
index 257bc26..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package lambda provides gucumber integration tests support.
-package lambda
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/lambda"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@lambda", func() {
- gucumber.World["client"] = lambda.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature
deleted file mode 100644
index 6ff9cf4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@lambda @client
-Feature: Amazon Lambda
-
- Scenario: Making a request
- When I call the "ListFunctions" API
- Then the value at "Functions" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "Invoke" API with:
- | FunctionName | bogus-function |
- Then I expect the response error code to be "ResourceNotFoundException"
- And I expect the response error message to include:
- """
- Function not found
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go
deleted file mode 100644
index a8ba24c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package machinelearning provides gucumber integration tests support.
-package machinelearning
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/machinelearning"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@machinelearning", func() {
- gucumber.World["client"] = machinelearning.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature
deleted file mode 100644
index 2d9b064..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature
+++ /dev/null
@@ -1,18 +0,0 @@
-# language: en
-@machinelearning @client
-Feature: Amazon Machine Learning
-
- I want to use Amazon Machine Learning
-
- Scenario: Making a request
- When I call the "DescribeMLModels" API
- Then the value at "Results" should be a list
-
- Scenario: Error handling
- When I attempt to call the "GetBatchPrediction" API with:
- | BatchPredictionId | fake-id |
- Then the error code should be "ResourceNotFoundException"
- And the error message should contain:
- """
- No BatchPrediction with id fake-id exists
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go
deleted file mode 100644
index 8f3f537..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package opsworks provides gucumber integration tests support.
-package opsworks
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/opsworks"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@opsworks", func() {
- gucumber.World["client"] = opsworks.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature
deleted file mode 100644
index a9cfe52..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@opsworks @client
-Feature: AWS OpsWorks
-
- Scenario: Making a request
- When I call the "DescribeStacks" API
- Then the value at "Stacks" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeLayers" API with:
- | StackId | fake_stack |
- Then I expect the response error code to be "ResourceNotFoundException"
- And I expect the response error message to include:
- """
- Unable to find stack with ID fake_stack
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go
deleted file mode 100644
index a12c73b..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package rds provides gucumber integration tests support.
-package rds
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/rds"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@rds", func() {
- gucumber.World["client"] = rds.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature
deleted file mode 100644
index 547d76d..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@rds @client
-Feature: Amazon RDS
-
- Scenario: Making a request
- When I call the "DescribeDBEngineVersions" API
- Then the value at "DBEngineVersions" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeDBInstances" API with:
- | DBInstanceIdentifier | fake-id |
- Then I expect the response error code to be "DBInstanceNotFound"
- And I expect the response error message to include:
- """
- DBInstance fake-id not found.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go
deleted file mode 100644
index 9e6da95..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package redshift provides gucumber integration tests support.
-package redshift
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/redshift"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@redshift", func() {
- gucumber.World["client"] = redshift.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature
deleted file mode 100644
index 8cb45b1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@redshift @client
-Feature: Amazon Redshift
-
- Scenario: Making a request
- When I call the "DescribeClusterVersions" API
- Then the value at "ClusterVersions" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeClusters" API with:
- | ClusterIdentifier | fake-cluster |
- Then I expect the response error code to be "ClusterNotFound"
- And I expect the response error message to include:
- """
- Cluster fake-cluster not found.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go
deleted file mode 100644
index a55a14e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package route53 provides gucumber integration tests support.
-package route53
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/route53"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@route53", func() {
- gucumber.World["client"] = route53.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature
deleted file mode 100644
index 51463c5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@route53 @client
-Feature: Amazon Route 53
-
- Scenario: Making a request
- When I call the "ListHostedZones" API
- Then the value at "HostedZones" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetHostedZone" API with:
- | Id | fake-zone |
- Then I expect the response error code to be "NoSuchHostedZone"
- And I expect the response error message to include:
- """
- No hosted zone found with ID: fake-zone
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go
deleted file mode 100644
index c47de45..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package route53domains provides gucumber integration tests support.
-package route53domains
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/route53domains"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@route53domains", func() {
- gucumber.World["client"] = route53domains.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature
deleted file mode 100644
index f18dcc4..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@route53domains @client
-Feature: Amazon Route53 Domains
-
- Scenario: Making a request
- When I call the "ListDomains" API
- Then the value at "Domains" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetDomainDetail" API with:
- | DomainName | fake-domain-name |
- Then I expect the response error code to be "InvalidInput"
- And I expect the response error message to include:
- """
- domain name must contain more than 1 label
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go
deleted file mode 100644
index f81947a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package ses provides gucumber integration tests support.
-package ses
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/ses"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@ses", func() {
- gucumber.World["client"] = ses.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature
deleted file mode 100644
index 6b67fa7..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@ses @email @client
-Feature: Amazon Simple Email Service
-
- Scenario: Making a request
- When I call the "ListIdentities" API
- Then the value at "Identities" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "VerifyEmailIdentity" API with:
- | EmailAddress | fake_email |
- Then I expect the response error code to be "InvalidParameterValue"
- And I expect the response error message to include:
- """
- Invalid email address.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go
deleted file mode 100644
index dbb1338..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// +build integration
-
-// Package smoke contains shared step definitions that are used across integration tests
-package smoke
-
-import (
- "encoding/json"
- "fmt"
- "os"
- "reflect"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/gucumber/gucumber"
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/aws/aws-sdk-go/aws/session"
-)
-
-// Session is a shared session for all integration smoke tests to use.
-var Session = session.Must(session.NewSession())
-
-func init() {
- logLevel := Session.Config.LogLevel
- if os.Getenv("DEBUG") != "" {
- logLevel = aws.LogLevel(aws.LogDebug)
- }
- if os.Getenv("DEBUG_SIGNING") != "" {
- logLevel = aws.LogLevel(aws.LogDebugWithSigning)
- }
- if os.Getenv("DEBUG_BODY") != "" {
- logLevel = aws.LogLevel(aws.LogDebugWithHTTPBody)
- }
- Session.Config.LogLevel = logLevel
-
- gucumber.When(`^I call the "(.+?)" API$`, func(op string) {
- call(op, nil, false)
- })
-
- gucumber.When(`^I call the "(.+?)" API with:$`, func(op string, args [][]string) {
- call(op, args, false)
- })
-
- gucumber.Then(`^the value at "(.+?)" should be a list$`, func(member string) {
- vals, _ := awsutil.ValuesAtPath(gucumber.World["response"], member)
- assert.NotNil(gucumber.T, vals)
- })
-
- gucumber.Then(`^the response should contain a "(.+?)"$`, func(member string) {
- vals, _ := awsutil.ValuesAtPath(gucumber.World["response"], member)
- assert.NotEmpty(gucumber.T, vals)
- })
-
- gucumber.When(`^I attempt to call the "(.+?)" API with:$`, func(op string, args [][]string) {
- call(op, args, true)
- })
-
- gucumber.Then(`^I expect the response error code to be "(.+?)"$`, func(code string) {
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.True(gucumber.T, ok, "no error returned")
- if ok {
- assert.Equal(gucumber.T, code, err.Code(), "Error: %v", err)
- }
- })
-
- gucumber.And(`^I expect the response error message to include:$`, func(data string) {
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.True(gucumber.T, ok, "no error returned")
- if ok {
- assert.Contains(gucumber.T, err.Error(), data)
- }
- })
-
- gucumber.And(`^I expect the response error message to include one of:$`, func(table [][]string) {
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.True(gucumber.T, ok, "no error returned")
- if ok {
- found := false
- for _, row := range table {
- if strings.Contains(err.Error(), row[0]) {
- found = true
- break
- }
- }
-
- assert.True(gucumber.T, found, fmt.Sprintf("no error messages matched: \"%s\"", err.Error()))
- }
- })
-
- gucumber.And(`^I expect the response error message not be empty$`, func() {
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.True(gucumber.T, ok, "no error returned")
- assert.NotEmpty(gucumber.T, err.Message())
- })
-
- gucumber.When(`^I call the "(.+?)" API with JSON:$`, func(s1 string, data string) {
- callWithJSON(s1, data, false)
- })
-
- gucumber.When(`^I attempt to call the "(.+?)" API with JSON:$`, func(s1 string, data string) {
- callWithJSON(s1, data, true)
- })
-
- gucumber.Then(`^the error code should be "(.+?)"$`, func(s1 string) {
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.True(gucumber.T, ok, "no error returned")
- assert.Equal(gucumber.T, s1, err.Code())
- })
-
- gucumber.And(`^the error message should contain:$`, func(data string) {
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.True(gucumber.T, ok, "no error returned")
- assert.Contains(gucumber.T, err.Error(), data)
- })
-
- gucumber.Then(`^the request should fail$`, func() {
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.True(gucumber.T, ok, "no error returned")
- assert.Error(gucumber.T, err)
- })
-
- gucumber.Then(`^the request should be successful$`, func() {
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.False(gucumber.T, ok, "error returned")
- assert.NoError(gucumber.T, err)
- })
-}
-
-// findMethod finds the op operation on the v structure using a case-insensitive
-// lookup. Returns nil if no method is found.
-func findMethod(v reflect.Value, op string) *reflect.Value {
- t := v.Type()
- op = strings.ToLower(op)
- for i := 0; i < t.NumMethod(); i++ {
- name := t.Method(i).Name
- if strings.ToLower(name) == op {
- m := v.MethodByName(name)
- return &m
- }
- }
- return nil
-}
-
-// call calls an operation on gucumber.World["client"] by the name op using the args
-// table of arguments to set.
-func call(op string, args [][]string, allowError bool) {
- v := reflect.ValueOf(gucumber.World["client"])
- if m := findMethod(v, op); m != nil {
- t := m.Type()
- in := reflect.New(t.In(0).Elem())
- fillArgs(in, args)
-
- resps := m.Call([]reflect.Value{in})
- gucumber.World["response"] = resps[0].Interface()
- gucumber.World["error"] = resps[1].Interface()
-
- if !allowError {
- err, _ := gucumber.World["error"].(error)
- assert.NoError(gucumber.T, err)
- }
- } else {
- assert.Fail(gucumber.T, "failed to find operation "+op)
- }
-}
-
-// reIsNum is a regular expression matching a numeric input (integer)
-var reIsNum = regexp.MustCompile(`^\d+$`)
-
-// reIsArray is a regular expression matching a list
-var reIsArray = regexp.MustCompile(`^\['.*?'\]$`)
-var reArrayElem = regexp.MustCompile(`'(.+?)'`)
-
-// fillArgs fills arguments on the input structure using the args table of
-// arguments.
-func fillArgs(in reflect.Value, args [][]string) {
- if args == nil {
- return
- }
-
- for _, row := range args {
- path := row[0]
- var val interface{} = row[1]
- if reIsArray.MatchString(row[1]) {
- quotedStrs := reArrayElem.FindAllString(row[1], -1)
- strs := make([]*string, len(quotedStrs))
- for i, e := range quotedStrs {
- str := e[1 : len(e)-1]
- strs[i] = &str
- }
- val = strs
- } else if reIsNum.MatchString(row[1]) { // handle integer values
- num, err := strconv.ParseInt(row[1], 10, 64)
- if err == nil {
- val = num
- }
- }
- awsutil.SetValueAtPath(in.Interface(), path, val)
- }
-}
-
-func callWithJSON(op, j string, allowError bool) {
- v := reflect.ValueOf(gucumber.World["client"])
- if m := findMethod(v, op); m != nil {
- t := m.Type()
- in := reflect.New(t.In(0).Elem())
- fillJSON(in, j)
-
- resps := m.Call([]reflect.Value{in})
- gucumber.World["response"] = resps[0].Interface()
- gucumber.World["error"] = resps[1].Interface()
-
- if !allowError {
- err, _ := gucumber.World["error"].(error)
- assert.NoError(gucumber.T, err)
- }
- } else {
- assert.Fail(gucumber.T, "failed to find operation "+op)
- }
-}
-
-func fillJSON(in reflect.Value, j string) {
- d := json.NewDecoder(strings.NewReader(j))
- if err := d.Decode(in.Interface()); err != nil {
- panic(err)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go
deleted file mode 100644
index e8d9ec2..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package simpledb provides gucumber integration tests support.
-package simpledb
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/simpledb"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@simpledb", func() {
- gucumber.World["client"] = simpledb.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature
deleted file mode 100644
index ddc03d8..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature
+++ /dev/null
@@ -1,24 +0,0 @@
-# language: en
-@simpledb @sdb
-Feature: Amazon SimpleDB
-
- I want to use Amazon SimpleDB
-
- Scenario: Making a request
- When I call the "CreateDomain" API with:
- | DomainName | sample-domain |
- Then the request should be successful
- And I call the "ListDomains" API
- Then the value at "DomainNames" should be a list
- And I call the "DeleteDomain" API with:
- | DomainName | sample-domain |
- Then the request should be successful
-
- Scenario: Handling errors
- When I attempt to call the "CreateDomain" API with:
- | DomainName | |
- Then I expect the response error code to be "InvalidParameterValue"
- And I expect the response error message to include:
- """
- DomainName is invalid
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go
deleted file mode 100644
index cbf990c..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package sns provides gucumber integration tests support.
-package sns
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/sns"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@sns", func() {
- gucumber.World["client"] = sns.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature
deleted file mode 100644
index 76f6a16..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature
+++ /dev/null
@@ -1,14 +0,0 @@
-# language: en
-@sns @client
-Feature: Amazon Simple Notification Service
-
- Scenario: Making a request
- When I call the "ListTopics" API
- Then the value at "Topics" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "Publish" API with:
- | Message | hello |
- | TopicArn | fake_topic |
- Then I expect the response error code to be "InvalidParameter"
- And I expect the response error message not be empty
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go
deleted file mode 100644
index 884dbbd..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package sqs provides gucumber integration tests support.
-package sqs
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/sqs"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@sqs", func() {
- gucumber.World["client"] = sqs.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature
deleted file mode 100644
index 1413820..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@sqs @client
-Feature: Amazon Simple Queue Service
-
- Scenario: Making a request
- When I call the "ListQueues" API
- Then the value at "QueueUrls" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetQueueUrl" API with:
- | QueueName | fake_queue |
- Then I expect the response error code to be "AWS.SimpleQueueService.NonExistentQueue"
- And I expect the response error message to include:
- """
- The specified queue does not exist for this wsdl version.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go
deleted file mode 100644
index af5e2aa..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package ssm provides gucumber integration tests support.
-package ssm
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/ssm"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@ssm", func() {
- gucumber.World["client"] = ssm.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature
deleted file mode 100644
index 3e2230e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@ssm @client
-Feature: Amazon SSM
-
- Scenario: Making a request
- When I call the "ListDocuments" API
- Then the value at "DocumentIdentifiers" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "GetDocument" API with:
- | Name | 'fake-name' |
- Then I expect the response error code to be "ValidationException"
- And I expect the response error message to include:
- """
- validation error detected
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go
deleted file mode 100644
index 44d3731..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package storagegateway provides gucumber integration tests support.
-package storagegateway
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/storagegateway"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@storagegateway", func() {
- gucumber.World["client"] = storagegateway.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature
deleted file mode 100644
index ef96eed..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature
+++ /dev/null
@@ -1,16 +0,0 @@
-# language: en
-@storagegateway @client
-Feature: AWS Storage Gateway
-
- Scenario: Making a request
- When I call the "ListGateways" API
- Then the value at "Gateways" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "ListVolumes" API with:
- | GatewayARN | fake_gateway |
- Then I expect the response error code to be "InvalidParameter"
- And I expect the response error message to include:
- """
- GatewayARN
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go
deleted file mode 100644
index ed61e1b..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package sts provides gucumber integration tests support.
-package sts
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@sts", func() {
- gucumber.World["client"] = sts.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature
deleted file mode 100644
index 9caf1fa..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-# language: en
-@sts @client
-Feature: AWS STS
-
- Scenario: Making a request
- When I call the "GetSessionToken" API
- Then the response should contain a "Credentials"
-
- Scenario: Handling errors
- When I attempt to call the "GetFederationToken" API with:
- | Name | temp |
- | Policy | |
- Then I expect the response error code to be "InvalidParameter"
- And I expect the response error message to include:
- """
- Policy
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go
deleted file mode 100644
index 9322d57..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package support provides gucumber integration tests support.
-package support
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/support"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@support", func() {
- gucumber.World["client"] = support.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature
deleted file mode 100644
index 2f91ff8..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature
+++ /dev/null
@@ -1,22 +0,0 @@
-# language: en
-@support @client
-Feature: AWS Support
-
- I want to use AWS Support
-
- Scenario: Making a request
- When I call the "DescribeServices" API
- Then the value at "services" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "CreateCase" API with:
- | subject | subject |
- | communicationBody | communication |
- | categoryCode | category |
- | serviceCode | amazon-dynamodb |
- | severityCode | low |
- Then I expect the response error code to be "InvalidParameterValueException"
- And the error message should contain:
- """
- Invalid category code
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go
deleted file mode 100644
index 09020a2..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package swf provides gucumber integration tests support.
-package swf
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/swf"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@swf", func() {
- gucumber.World["client"] = swf.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature
deleted file mode 100644
index 1349c81..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-# language: en
-@swf @client
-Feature: Amazon Simple Workflow Service
-
- Scenario: Making a request
- When I call the "ListDomains" API with:
- | registrationStatus | REGISTERED |
- Then the value at "domainInfos" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeDomain" API with:
- | name | fake_domain |
- Then I expect the response error code to be "UnknownResourceFault"
- And I expect the response error message to include:
- """
- Unknown domain: fake_domain
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go
deleted file mode 100644
index 898f848..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package waf provides gucumber integration tests support.
-package waf
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/waf"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@waf", func() {
- gucumber.World["client"] = waf.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature
deleted file mode 100644
index bf76fb6..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature
+++ /dev/null
@@ -1,20 +0,0 @@
-# language: en
-@waf
-Feature: AWS WAF
-
- Scenario: Making a request
- When I call the "ListRules" API with JSON:
- """
- {"Limit":20}
- """
- Then the value at "Rules" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "CreateSqlInjectionMatchSet" API with:
- | Name | fake_name |
- | ChangeToken | fake_token |
- Then I expect the response error code to be "WAFStaleDataException"
- And I expect the response error message to include:
- """
- The input token is no longer current
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go
deleted file mode 100644
index 320fb1a..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build integration
-
-//Package workspaces provides gucumber integration tests support.
-package workspaces
-
-import (
- "github.com/aws/aws-sdk-go/awstesting/integration/smoke"
- "github.com/aws/aws-sdk-go/service/workspaces"
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@workspaces", func() {
- gucumber.World["client"] = workspaces.New(smoke.Session)
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature
deleted file mode 100644
index 09ca884..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature
+++ /dev/null
@@ -1,18 +0,0 @@
-# language: en
-@workspaces @client
-Feature: Amazon WorkSpaces
-
- I want to use Amazon WorkSpaces
-
- Scenario: Making a request
- When I call the "DescribeWorkspaces" API
- Then the value at "Workspaces" should be a list
-
- Scenario: Handling errors
- When I attempt to call the "DescribeWorkspaces" API with:
- | DirectoryId | fake-id |
- Then I expect the response error code to be "ValidationException"
- And I expect the response error message to include:
- """
- The Directory ID fake-id in the request is invalid.
- """
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/mock/mock.go b/vendor/github.com/aws/aws-sdk-go/awstesting/mock/mock.go
deleted file mode 100644
index 9f18007..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/mock/mock.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package mock
-
-import (
- "net/http"
- "net/http/httptest"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/session"
-)
-
-// Session is a mock session which is used to hit the mock server
-var Session = session.Must(session.NewSession(&aws.Config{
- DisableSSL: aws.Bool(true),
- Endpoint: aws.String(server.URL[7:]),
-}))
-
-// server is the mock server that simply writes a 200 status back to the client
-var server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusOK)
-}))
-
-// NewMockClient creates and initializes a client that will connect to the
-// mock server
-func NewMockClient(cfgs ...*aws.Config) *client.Client {
- c := Session.ClientConfig("Mock", cfgs...)
-
- svc := client.New(
- *c.Config,
- metadata.ClientInfo{
- ServiceName: "Mock",
- SigningRegion: c.SigningRegion,
- Endpoint: c.Endpoint,
- APIVersion: "2015-12-08",
- JSONVersion: "1.1",
- TargetPrefix: "MockServer",
- },
- c.Handlers,
- )
-
- return svc
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/benchmarks.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/benchmarks.go
deleted file mode 100644
index de13658..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/benchmarks.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// +build integration
-
-package performance
-
-import (
- "errors"
- "fmt"
- "os"
- "reflect"
- "runtime"
- "strings"
- "testing"
-
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/awstesting/mock"
- "github.com/gucumber/gucumber"
-)
-
-// mapCreateClients allows for the creation of clients
-func mapCreateClients() {
- clientFns := []func(){}
- for _, c := range clients {
- clientFns = append(clientFns, func() { c.Call([]reflect.Value{reflect.ValueOf(mock.Session)}) })
- }
-
- gucumber.World["services"] = clientFns
-}
-
-func buildAnArrayOfClients() {
- methods := []reflect.Value{}
- params := [][]reflect.Value{}
-
- for _, c := range clients {
- method, param, err := findAndGetMethod(c.Call([]reflect.Value{reflect.ValueOf(mock.Session)}))
- if err == nil {
- methods = append(methods, method)
- params = append(params, param)
- }
- }
-
- fns := []func(){}
- for i := 0; i < len(methods); i++ {
- m := methods[i]
- p := params[i]
- f := func() {
- reqs := m.Call(p)
- resp := reqs[0].Interface().(*request.Request).Send()
- fmt.Println(resp)
- }
- fns = append(fns, f)
- }
- gucumber.World["clientFns"] = fns
-}
-
-// findAndGetMethod will grab the method, params to be passed to the method, and an error.
-// The method that is found, is a method that doesn't have any required input
-func findAndGetMethod(client interface{}) (reflect.Value, []reflect.Value, error) {
- v := reflect.ValueOf(client).Type()
- n := v.NumMethod()
-
-outer:
- for i := 0; i < n; i++ {
- method := v.Method(i)
- if method.Type.NumIn() != 2 || strings.HasSuffix(method.Name, "Request") {
- continue
- }
- param := reflect.New(method.Type.In(1).Elem())
- for j := 0; j < param.Elem().NumField(); j++ {
- field := param.Elem().Type().Field(j)
- req := field.Tag.Get("required")
-
- if req == "true" {
- continue outer
- }
- }
-
- params := []reflect.Value{reflect.ValueOf(client), param}
- return method.Func, params, nil
- }
-
- return reflect.Value{}, nil, errors.New("No method found")
-}
-
-// benchmarkTask takes a unique key to write to the logger with the benchmark
-// result's data
-func benchmarkTask(key string, fns []func(), i1 int) error {
- gucumber.World["error"] = nil
- memStatStart := &runtime.MemStats{}
- runtime.ReadMemStats(memStatStart)
-
- results := testing.Benchmark(func(b *testing.B) {
- for _, f := range fns {
- for i := 0; i < i1; i++ {
- f()
- }
- }
- })
-
- results.N = i1
- memStatEnd := &runtime.MemStats{}
- runtime.ReadMemStats(memStatEnd)
- l, err := newBenchmarkLogger("stdout")
- if err != nil {
- return err
- }
- l.log(key, results)
-
- toDynamodb := os.Getenv("AWS_TESTING_LOG_RESULTS") == "true"
- if toDynamodb {
- l, err := newBenchmarkLogger("dynamodb")
- if err != nil {
- return err
- }
- l.log(key+"_start_benchmarks", memStatStart)
- l.log(key+"_end_benchmarks", memStatEnd)
- }
-
- if memStatStart.Alloc < memStatEnd.Alloc {
- return errors.New("Leaked memory")
- }
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/client.go
deleted file mode 100644
index 00c2e81..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/client.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build integration
-
-//Package performance provides gucumber integration tests support.
-package performance
-
-import (
- "github.com/gucumber/gucumber"
-)
-
-func init() {
- gucumber.Before("@performance", func() {
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.feature
deleted file mode 100644
index c248329..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.feature
+++ /dev/null
@@ -1,17 +0,0 @@
-# language: en
-@performance @clients
-Feature: Client Performance
- Background:
- Given I have loaded my SDK and its dependencies
- And I have a list of services
- And I take a snapshot of my resources
-
- Scenario: Creating and then cleaning up clients doesn't leak resources
- When I create and discard 100 clients for each service
- Then I should not have leaked any resources
-
- Scenario: Sending requests doesn't leak resources
- When I create a client for each service
- And I execute 100 command(s) on each client
- And I destroy all the clients
- Then I should not have leaked any resources
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.go
deleted file mode 100644
index 6baa444..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// +build integration
-
-package performance
-
-import (
- "reflect"
-
- "github.com/aws/aws-sdk-go/service/acm"
- "github.com/aws/aws-sdk-go/service/apigateway"
- "github.com/aws/aws-sdk-go/service/autoscaling"
- "github.com/aws/aws-sdk-go/service/cloudformation"
- "github.com/aws/aws-sdk-go/service/cloudfront"
- "github.com/aws/aws-sdk-go/service/cloudhsm"
- "github.com/aws/aws-sdk-go/service/cloudsearch"
- "github.com/aws/aws-sdk-go/service/cloudsearchdomain"
- "github.com/aws/aws-sdk-go/service/cloudtrail"
- "github.com/aws/aws-sdk-go/service/cloudwatch"
- "github.com/aws/aws-sdk-go/service/cloudwatchevents"
- "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
- "github.com/aws/aws-sdk-go/service/codecommit"
- "github.com/aws/aws-sdk-go/service/codedeploy"
- "github.com/aws/aws-sdk-go/service/codepipeline"
- "github.com/aws/aws-sdk-go/service/cognitoidentity"
- "github.com/aws/aws-sdk-go/service/cognitosync"
- "github.com/aws/aws-sdk-go/service/configservice"
- "github.com/aws/aws-sdk-go/service/datapipeline"
- "github.com/aws/aws-sdk-go/service/devicefarm"
- "github.com/aws/aws-sdk-go/service/directconnect"
- "github.com/aws/aws-sdk-go/service/directoryservice"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodbstreams"
- "github.com/aws/aws-sdk-go/service/ec2"
- "github.com/aws/aws-sdk-go/service/ecr"
- "github.com/aws/aws-sdk-go/service/ecs"
- "github.com/aws/aws-sdk-go/service/efs"
- "github.com/aws/aws-sdk-go/service/elasticache"
- "github.com/aws/aws-sdk-go/service/elasticbeanstalk"
- "github.com/aws/aws-sdk-go/service/elasticsearchservice"
- "github.com/aws/aws-sdk-go/service/elastictranscoder"
- "github.com/aws/aws-sdk-go/service/elb"
- "github.com/aws/aws-sdk-go/service/emr"
- "github.com/aws/aws-sdk-go/service/firehose"
- "github.com/aws/aws-sdk-go/service/glacier"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/aws/aws-sdk-go/service/inspector"
- "github.com/aws/aws-sdk-go/service/iot"
- "github.com/aws/aws-sdk-go/service/iotdataplane"
- "github.com/aws/aws-sdk-go/service/kinesis"
- "github.com/aws/aws-sdk-go/service/kms"
- "github.com/aws/aws-sdk-go/service/lambda"
- "github.com/aws/aws-sdk-go/service/machinelearning"
- "github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics"
- "github.com/aws/aws-sdk-go/service/mobileanalytics"
- "github.com/aws/aws-sdk-go/service/opsworks"
- "github.com/aws/aws-sdk-go/service/rds"
- "github.com/aws/aws-sdk-go/service/redshift"
- "github.com/aws/aws-sdk-go/service/route53"
- "github.com/aws/aws-sdk-go/service/route53domains"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/aws/aws-sdk-go/service/ses"
- "github.com/aws/aws-sdk-go/service/simpledb"
- "github.com/aws/aws-sdk-go/service/sns"
- "github.com/aws/aws-sdk-go/service/sqs"
- "github.com/aws/aws-sdk-go/service/ssm"
- "github.com/aws/aws-sdk-go/service/storagegateway"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/aws/aws-sdk-go/service/support"
- "github.com/aws/aws-sdk-go/service/swf"
- "github.com/aws/aws-sdk-go/service/waf"
- "github.com/aws/aws-sdk-go/service/workspaces"
-)
-
-var clients = []reflect.Value{
- reflect.ValueOf(acm.New),
- reflect.ValueOf(apigateway.New),
- reflect.ValueOf(autoscaling.New),
- reflect.ValueOf(cloudformation.New),
- reflect.ValueOf(cloudfront.New),
- reflect.ValueOf(cloudhsm.New),
- reflect.ValueOf(cloudsearch.New),
- reflect.ValueOf(cloudsearchdomain.New),
- reflect.ValueOf(cloudtrail.New),
- reflect.ValueOf(cloudwatch.New),
- reflect.ValueOf(cloudwatchevents.New),
- reflect.ValueOf(cloudwatchlogs.New),
- reflect.ValueOf(codecommit.New),
- reflect.ValueOf(codedeploy.New),
- reflect.ValueOf(codepipeline.New),
- reflect.ValueOf(cognitoidentity.New),
- reflect.ValueOf(cognitosync.New),
- reflect.ValueOf(configservice.New),
- reflect.ValueOf(datapipeline.New),
- reflect.ValueOf(devicefarm.New),
- reflect.ValueOf(directconnect.New),
- reflect.ValueOf(directoryservice.New),
- reflect.ValueOf(dynamodb.New),
- reflect.ValueOf(dynamodbstreams.New),
- reflect.ValueOf(ec2.New),
- reflect.ValueOf(ecr.New),
- reflect.ValueOf(ecs.New),
- reflect.ValueOf(efs.New),
- reflect.ValueOf(elasticache.New),
- reflect.ValueOf(elasticbeanstalk.New),
- reflect.ValueOf(elasticsearchservice.New),
- reflect.ValueOf(elastictranscoder.New),
- reflect.ValueOf(elb.New),
- reflect.ValueOf(emr.New),
- reflect.ValueOf(firehose.New),
- reflect.ValueOf(glacier.New),
- reflect.ValueOf(iam.New),
- reflect.ValueOf(inspector.New),
- reflect.ValueOf(iot.New),
- reflect.ValueOf(iotdataplane.New),
- reflect.ValueOf(kinesis.New),
- reflect.ValueOf(kms.New),
- reflect.ValueOf(lambda.New),
- reflect.ValueOf(machinelearning.New),
- reflect.ValueOf(marketplacecommerceanalytics.New),
- reflect.ValueOf(mobileanalytics.New),
- reflect.ValueOf(opsworks.New),
- reflect.ValueOf(rds.New),
- reflect.ValueOf(redshift.New),
- reflect.ValueOf(route53.New),
- reflect.ValueOf(route53domains.New),
- reflect.ValueOf(s3.New),
- reflect.ValueOf(ses.New),
- reflect.ValueOf(simpledb.New),
- reflect.ValueOf(sns.New),
- reflect.ValueOf(sqs.New),
- reflect.ValueOf(ssm.New),
- reflect.ValueOf(storagegateway.New),
- reflect.ValueOf(sts.New),
- reflect.ValueOf(support.New),
- reflect.ValueOf(swf.New),
- reflect.ValueOf(waf.New),
- reflect.ValueOf(workspaces.New),
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go
deleted file mode 100644
index 81596d1..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// +build integration
-
-package performance
-
-import (
- "bytes"
- "errors"
- "fmt"
- "runtime"
-
- "github.com/gucumber/gucumber"
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/awstesting/mock"
- "github.com/aws/aws-sdk-go/service/s3"
-)
-
-func init() {
- // Go loads all of its dependecies on compile
- gucumber.Given(`^I have loaded my SDK and its dependencies$`, func() {
- })
-
- // Performance
- gucumber.When(`^I create and discard (\d+) clients for each service$`, func(i1 int) {
- services := gucumber.World["services"].([]func())
- err := benchmarkTask(fmt.Sprintf("%d_create_and_discard_clients", i1), services, i1)
- gucumber.World["error"] = err
- })
-
- gucumber.Then(`^I should not have leaked any resources$`, func() {
- runtime.GC()
- err, ok := gucumber.World["error"].(awserr.Error)
- assert.False(gucumber.T, ok, "error returned")
- assert.NoError(gucumber.T, err)
- })
-
- gucumber.And(`^I have a list of services$`, func() {
- mapCreateClients()
- })
-
- gucumber.And(`^I take a snapshot of my resources$`, func() {
- // Can't take a memory snapshot here, because gucumber does some
- // allocation between each instruction leading to unreliable numbers
- })
-
- gucumber.When(`^I create a client for each service$`, func() {
- buildAnArrayOfClients()
- })
-
- gucumber.And("^I execute (\\d+) command\\(s\\) on each client$", func(i1 int) {
- clientFns := gucumber.World["clientFns"].([]func())
- err := benchmarkTask(fmt.Sprintf("%d_commands_on_clients", i1), clientFns, i1)
- gucumber.World["error"] = err
- })
-
- gucumber.And(`^I destroy all the clients$`, func() {
- delete(gucumber.World, "clientFns")
- runtime.GC()
- })
-
- gucumber.Given(`^I have a (\d+) byte file$`, func(i1 int) {
- gucumber.World["file"] = make([]byte, i1)
- })
-
- gucumber.When(`^I upload the file$`, func() {
- svc := s3.New(mock.Session)
- memStatStart := &runtime.MemStats{}
- runtime.ReadMemStats(memStatStart)
- gucumber.World["start"] = memStatStart
-
- svc.PutObjectRequest(&s3.PutObjectInput{
- Bucket: aws.String("bucketmesilly"),
- Key: aws.String("testKey"),
- Body: bytes.NewReader(gucumber.World["file"].([]byte)),
- })
- })
-
- gucumber.And(`then download the file$`, func() {
- svc := s3.New(mock.Session)
- svc.GetObjectRequest(&s3.GetObjectInput{
- Bucket: aws.String("bucketmesilly"),
- Key: aws.String("testKey"),
- })
- memStatEnd := &runtime.MemStats{}
- runtime.ReadMemStats(memStatEnd)
- memStatStart := gucumber.World["start"].(*runtime.MemStats)
- if memStatStart.Alloc < memStatEnd.Alloc {
- gucumber.World["error"] = errors.New("Leaked memory")
- }
- })
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/logging.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/logging.go
deleted file mode 100644
index 03c885f..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/logging.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// +build integration
-
-// Package performance contains shared step definitions that are used for performance testing
-package performance
-
-import (
- "errors"
- "fmt"
- "os"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/awstesting/unit"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
-)
-
-// benchmarkLogger handles all benchmark logging
-type benchmarkLogger struct {
- outputer
-}
-
-// logger interface that handles any logging to an output
-type logger interface {
- log(key string, data map[string]interface{}) error
-}
-
-// init initializes the logger and uses dependency injection for the
-// outputer
-func newBenchmarkLogger(output string) (*benchmarkLogger, error) {
- b := &benchmarkLogger{}
- switch output {
- case "dynamodb":
- region := os.Getenv("AWS_TESTING_REGION")
- if region == "" {
- return b, errors.New("No region specified. Please export AWS_TESTING_REGION")
- }
-
- table := os.Getenv("AWS_TESTING_DB_TABLE")
- if table == "" {
- return b, errors.New("No table specified. Please export AWS_TESTING_DB_TABLE")
- }
- b.outputer = newDynamodbOut(table, region)
- case "stdout":
- b.outputer = stdout{}
- default:
- return b, errors.New("Unsupported outputer")
- }
- return b, nil
-}
-
-type record struct {
- Key string
- Data interface{}
-}
-
-// log calls the output command and building a data structure
-// to pass into its output formatter
-func (b benchmarkLogger) log(key, data interface{}) error {
- formatData := record{
- Key: fmt.Sprintf("%d-%v", time.Now().Unix(), key.(string)),
- Data: data,
- }
-
- return b.output(formatData)
-}
-
-// outputer is a simple interface that'll handle output
-// to whatever system like dynamodb or stdout
-type outputer interface {
- output(record) error
-}
-
-// dyanmodbOut handles simple writes to dynamodb
-type dynamodbOut struct {
- table string // table to write to in dynamodb
- region string
- db *dynamodb.DynamoDB // the dynamodb
-}
-
-// init initializes dynamodbOut
-func newDynamodbOut(table, region string) *dynamodbOut {
- out := dynamodbOut{
- table: table,
- region: region,
- }
-
- out.db = dynamodb.New(
- unit.Session,
- &aws.Config{Region: &out.region},
- )
- return &out
-}
-
-// output just writes to dynamodb
-func (out dynamodbOut) output(data record) error {
- input := &dynamodb.PutItemInput{
- TableName: aws.String(out.table),
- }
-
- item, err := dynamodbattribute.ConvertToMap(data)
- if err != nil {
- return err
- }
-
- input.Item = item
- _, err = out.db.PutItem(input)
- return err
-}
-
-// stdout handles writes to stdout
-type stdout struct{}
-
-// output expects key value data to print to stdout
-func (out stdout) output(data record) error {
- item, err := dynamodbattribute.ConvertToMap(data.Data)
- if err != nil {
- return err
- }
- fmt.Println(item)
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/streaming.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/streaming.feature
deleted file mode 100644
index cd24cb7..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/streaming.feature
+++ /dev/null
@@ -1,26 +0,0 @@
-# language: en
-@performance @streaming
-Feature: Streaming transfers consume a fixed amount of memory
-
- Scenario Outline: Streaming uploads are O(1) in memory usage
- Given I have a byte file
- And I take a snapshot of my resources
- When I upload the file
- Then I should not have leaked any resources
-
- Examples:
- | bytes |
- | 2097152 |
- | 209715200 |
-
- Scenario Outline: Streaming download are O(1) in memory usage
- Given I have a byte file
- And I take a snapshot of my resources
- When I upload the file
- And then download the file
- Then I should not have leaked any resources
-
- Examples:
- | bytes |
- | 2097152 |
- | 209715200 |
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.golang-tip b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.golang-tip
deleted file mode 100644
index 70148d5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.golang-tip
+++ /dev/null
@@ -1,42 +0,0 @@
-# Based on docker-library's golang 1.6 alpine and wheezy docker files.
-# https://github.com/docker-library/golang/blob/master/1.6/alpine/Dockerfile
-# https://github.com/docker-library/golang/blob/master/1.6/wheezy/Dockerfile
-FROM buildpack-deps:wheezy-scm
-
-ENV GOLANG_VERSION tip
-ENV GOLANG_SRC_REPO_URL https://go.googlesource.com/go
-
-ENV GOLANG_BOOTSTRAP_URL https://storage.googleapis.com/golang/go1.4.3.linux-amd64.tar.gz
-ENV GOLANG_BOOTSTRAP_SHA256 ce3140662f45356eb78bc16a88fc7cfb29fb00e18d7c632608245b789b2086d2
-ENV GOLANG_BOOTSTRAP_PATH /usr/local/bootstrap
-
-# gcc for cgo
-RUN apt-get update && apt-get install -y --no-install-recommends \
- g++ \
- gcc \
- libc6-dev \
- make \
- git \
- && rm -rf /var/lib/apt/lists/*
-
-# Setup the Bootstrap
-RUN mkdir -p "$GOLANG_BOOTSTRAP_PATH" \
- && curl -fsSL "$GOLANG_BOOTSTRAP_URL" -o golang.tar.gz \
- && echo "$GOLANG_BOOTSTRAP_SHA256 golang.tar.gz" | sha256sum -c - \
- && tar -C "$GOLANG_BOOTSTRAP_PATH" -xzf golang.tar.gz \
- && rm golang.tar.gz
-
-# Get and build Go tip
-RUN export GOROOT_BOOTSTRAP=$GOLANG_BOOTSTRAP_PATH/go \
- && git clone "$GOLANG_SRC_REPO_URL" /usr/local/go \
- && cd /usr/local/go/src \
- && ./make.bash \
- && rm -rf "$GOLANG_BOOTSTRAP_PATH" /usr/local/go/pkg/bootstrap
-
-# Build Go workspace and environment
-ENV GOPATH /go
-ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
-RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" \
- && chmod -R 777 "$GOPATH"
-
-WORKDIR $GOPATH
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.4 b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.4
deleted file mode 100644
index e048ed5..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.4
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM ubuntu:12.04
-FROM golang:1.4
-
-ADD . /go/src/github.com/aws/aws-sdk-go
-
-WORKDIR /go/src/github.com/aws/aws-sdk-go
-CMD ["make", "get-deps", "unit"]
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5-novendorexp b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5-novendorexp
deleted file mode 100644
index 9ec9f16..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5-novendorexp
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM ubuntu:12.04
-FROM golang:1.5
-
-ADD . /go/src/github.com/aws/aws-sdk-go
-
-WORKDIR /go/src/github.com/aws/aws-sdk-go
-CMD ["make", "get-deps", "unit"]
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.7 b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.7
deleted file mode 100644
index aed4408..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.7
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM ubuntu:12.04
-FROM golang:1.7
-
-ADD . /go/src/github.com/aws/aws-sdk-go
-
-WORKDIR /go/src/github.com/aws/aws-sdk-go
-CMD ["make", "unit"]
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.gotip b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.gotip
deleted file mode 100644
index 9758279..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.gotip
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM ubuntu:12.04
-FROM aws-golang:tip
-
-ADD . /go/src/github.com/aws/aws-sdk-go
-
-WORKDIR /go/src/github.com/aws/aws-sdk-go
-CMD ["make", "unit"]
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/unit/unit.go b/vendor/github.com/aws/aws-sdk-go/awstesting/unit/unit.go
deleted file mode 100644
index 1c6e605..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/unit/unit.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package unit performs initialization and validation for unit tests
-package unit
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/session"
-)
-
-// Session is a shared session for unit tests to use.
-var Session = session.Must(session.NewSession(aws.NewConfig().
- WithCredentials(credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")).
- WithRegion("mock-region")))
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/util.go b/vendor/github.com/aws/aws-sdk-go/awstesting/util.go
deleted file mode 100644
index 77c296e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/util.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package awstesting
-
-import (
- "io"
-
- "github.com/aws/aws-sdk-go/private/util"
-)
-
-// ZeroReader is a io.Reader which will always write zeros to the byte slice provided.
-type ZeroReader struct{}
-
-// Read fills the provided byte slice with zeros returning the number of bytes written.
-func (r *ZeroReader) Read(b []byte) (int, error) {
- for i := 0; i < len(b); i++ {
- b[i] = 0
- }
- return len(b), nil
-}
-
-// ReadCloser is a io.ReadCloser for unit testing.
-// Designed to test for leaks and whether a handle has
-// been closed
-type ReadCloser struct {
- Size int
- Closed bool
- set bool
- FillData func(bool, []byte, int, int)
-}
-
-// Read will call FillData and fill it with whatever data needed.
-// Decrements the size until zero, then return io.EOF.
-func (r *ReadCloser) Read(b []byte) (int, error) {
- if r.Closed {
- return 0, io.EOF
- }
-
- delta := len(b)
- if delta > r.Size {
- delta = r.Size
- }
- r.Size -= delta
-
- for i := 0; i < delta; i++ {
- b[i] = 'a'
- }
-
- if r.FillData != nil {
- r.FillData(r.set, b, r.Size, delta)
- }
- r.set = true
-
- if r.Size > 0 {
- return delta, nil
- }
- return delta, io.EOF
-}
-
-// Close sets Closed to true and returns no error
-func (r *ReadCloser) Close() error {
- r.Closed = true
- return nil
-}
-
-// SortedKeys returns a sorted slice of keys of a map.
-func SortedKeys(m map[string]interface{}) []string {
- return util.SortedKeys(m)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go b/vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go
deleted file mode 100644
index 4b03db0..0000000
--- a/vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package awstesting_test
-
-import (
- "io"
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/aws/aws-sdk-go/awstesting"
-)
-
-func TestReadCloserClose(t *testing.T) {
- rc := awstesting.ReadCloser{Size: 1}
- err := rc.Close()
-
- assert.Nil(t, err)
- assert.True(t, rc.Closed)
- assert.Equal(t, rc.Size, 1)
-}
-
-func TestReadCloserRead(t *testing.T) {
- rc := awstesting.ReadCloser{Size: 5}
- b := make([]byte, 2)
-
- n, err := rc.Read(b)
-
- assert.Nil(t, err)
- assert.Equal(t, n, 2)
- assert.False(t, rc.Closed)
- assert.Equal(t, rc.Size, 3)
-
- err = rc.Close()
- assert.Nil(t, err)
- n, err = rc.Read(b)
- assert.Equal(t, err, io.EOF)
- assert.Equal(t, n, 0)
-}
-
-func TestReadCloserReadAll(t *testing.T) {
- rc := awstesting.ReadCloser{Size: 5}
- b := make([]byte, 5)
-
- n, err := rc.Read(b)
-
- assert.Equal(t, err, io.EOF)
- assert.Equal(t, n, 5)
- assert.False(t, rc.Closed)
- assert.Equal(t, rc.Size, 0)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/callgraph.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/callgraph.html
deleted file mode 100644
index c56b2ef..0000000
--- a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/callgraph.html
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-
▹ Internal call graph
-
-
-
▾ Internal call graph
-
- This viewer shows the portion of the internal call
- graph of this package that is reachable from this function.
- See the package's call
- graph for more information.
-
').appendTo(tocRow).append(dl2);
-}
-
-function bindToggle(el) {
- $('.toggleButton', el).click(function() {
- if ($(el).is('.toggle')) {
- $(el).addClass('toggleVisible').removeClass('toggle');
- } else {
- $(el).addClass('toggle').removeClass('toggleVisible');
- }
- });
-}
-function bindToggles(selector) {
- $(selector).each(function(i, el) {
- bindToggle(el);
- });
-}
-
-function bindToggleLink(el, prefix) {
- $(el).click(function() {
- var href = $(el).attr('href');
- var i = href.indexOf('#'+prefix);
- if (i < 0) {
- return;
- }
- var id = '#' + prefix + href.slice(i+1+prefix.length);
- if ($(id).is('.toggle')) {
- $(id).find('.toggleButton').first().click();
- }
- });
-}
-function bindToggleLinks(selector, prefix) {
- $(selector).each(function(i, el) {
- bindToggleLink(el, prefix);
- });
-}
-
-function setupDropdownPlayground() {
- if (!$('#page').is('.wide')) {
- return; // don't show on front page
- }
- var button = $('#playgroundButton');
- var div = $('#playground');
- var setup = false;
- button.toggle(function() {
- button.addClass('active');
- div.show();
- if (setup) {
- return;
- }
- setup = true;
- playground({
- 'codeEl': $('.code', div),
- 'outputEl': $('.output', div),
- 'runEl': $('.run', div),
- 'fmtEl': $('.fmt', div),
- 'shareEl': $('.share', div),
- 'shareRedirect': '//play.golang.org/p/'
- });
- },
- function() {
- button.removeClass('active');
- div.hide();
- });
- button.show();
- $('#menu').css('min-width', '+=60');
-}
-
-function setupInlinePlayground() {
- 'use strict';
- // Set up playground when each element is toggled.
- $('div.play').each(function (i, el) {
- // Set up playground for this example.
- var setup = function() {
- var code = $('.code', el);
- playground({
- 'codeEl': code,
- 'outputEl': $('.output', el),
- 'runEl': $('.run', el),
- 'fmtEl': $('.fmt', el),
- 'shareEl': $('.share', el),
- 'shareRedirect': '//play.golang.org/p/'
- });
-
- // Make the code textarea resize to fit content.
- var resize = function() {
- code.height(0);
- var h = code[0].scrollHeight;
- code.height(h+20); // minimize bouncing.
- code.closest('.input').height(h);
- };
- code.on('keydown', resize);
- code.on('keyup', resize);
- code.keyup(); // resize now.
- };
-
- // If example already visible, set up playground now.
- if ($(el).is(':visible')) {
- setup();
- return;
- }
-
- // Otherwise, set up playground when example is expanded.
- var built = false;
- $(el).closest('.toggle').click(function() {
- // Only set up once.
- if (!built) {
- setup();
- built = true;
- }
- });
- });
-}
-
-// fixFocus tries to put focus to div#page so that keyboard navigation works.
-function fixFocus() {
- var page = $('div#page');
- var topbar = $('div#topbar');
- page.css('outline', 0); // disable outline when focused
- page.attr('tabindex', -1); // and set tabindex so that it is focusable
- $(window).resize(function (evt) {
- // only focus page when the topbar is at fixed position (that is, it's in
- // front of page, and keyboard event will go to the former by default.)
- // by focusing page, keyboard event will go to page so that up/down arrow,
- // space, etc. will work as expected.
- if (topbar.css('position') == "fixed")
- page.focus();
- }).resize();
-}
-
-function toggleHash() {
- var hash = $(window.location.hash);
- if (hash.is('.toggle')) {
- hash.find('.toggleButton').first().click();
- }
-}
-
-function personalizeInstallInstructions() {
- var prefix = '?download=';
- var s = window.location.search;
- if (s.indexOf(prefix) != 0) {
- // No 'download' query string; bail.
- return;
- }
-
- var filename = s.substr(prefix.length);
- var filenameRE = /^go1\.\d+(\.\d+)?([a-z0-9]+)?\.([a-z0-9]+)(-[a-z0-9]+)?(-osx10\.[68])?\.([a-z.]+)$/;
- $('.downloadFilename').text(filename);
- $('.hideFromDownload').hide();
- var m = filenameRE.exec(filename);
- if (!m) {
- // Can't interpret file name; bail.
- return;
- }
-
- var os = m[3];
- var ext = m[6];
- if (ext != 'tar.gz') {
- $('#tarballInstructions').hide();
- }
- if (os != 'darwin' || ext != 'pkg') {
- $('#darwinPackageInstructions').hide();
- }
- if (os != 'windows') {
- $('#windowsInstructions').hide();
- $('.testUnix').show();
- $('.testWindows').hide();
- } else {
- if (ext != 'msi') {
- $('#windowsInstallerInstructions').hide();
- }
- if (ext != 'zip') {
- $('#windowsZipInstructions').hide();
- }
- $('.testUnix').hide();
- $('.testWindows').show();
- }
-
- var download = "https://storage.googleapis.com/golang/" + filename;
-
- var message = $('
'+
- 'Your download should begin shortly. '+
- 'If it does not, click this link.
');
- message.find('a').attr('href', download);
- message.insertAfter('#nav');
-
- window.location = download;
-}
-
-$(document).ready(function() {
- bindSearchEvents();
- generateTOC();
- bindToggles(".toggle");
- bindToggles(".toggleVisible");
- bindToggleLinks(".exampleLink", "example_");
- bindToggleLinks(".overviewLink", "");
- bindToggleLinks(".examplesLink", "");
- bindToggleLinks(".indexLink", "");
- setupDropdownPlayground();
- setupInlinePlayground();
- fixFocus();
- setupTypeInfo();
- setupCallgraphs();
- toggleHash();
- personalizeInstallInstructions();
-
- // godoc.html defines window.initFuncs in the tag, and root.html and
- // codewalk.js push their on-page-ready functions to the list.
- // We execute those functions here, to avoid loading jQuery until the page
- // content is loaded.
- for (var i = 0; i < window.initFuncs.length; i++) window.initFuncs[i]();
-});
-
-// -- analysis ---------------------------------------------------------
-
-// escapeHTML returns HTML for s, with metacharacters quoted.
-// It is safe for use in both elements and attributes
-// (unlike the "set innerText, read innerHTML" trick).
-function escapeHTML(s) {
- return s.replace(/&/g, '&').
- replace(/\"/g, '"').
- replace(/\'/g, ''').
- replace(//g, '>');
-}
-
-// makeAnchor returns HTML for an element, given an anchorJSON object.
-function makeAnchor(json) {
- var html = escapeHTML(json.Text);
- if (json.Href != "") {
- html = "" + html + "";
- }
- return html;
-}
-
-function showLowFrame(html) {
- var lowframe = document.getElementById('lowframe');
- lowframe.style.height = "200px";
- lowframe.innerHTML = "
" + html + "
\n" +
- "
✘
"
-};
-
-document.hideLowFrame = function() {
- var lowframe = document.getElementById('lowframe');
- lowframe.style.height = "0px";
-}
-
-// onClickCallers is the onclick action for the 'func' tokens of a
-// function declaration.
-document.onClickCallers = function(index) {
- var data = document.ANALYSIS_DATA[index]
- if (data.Callers.length == 1 && data.Callers[0].Sites.length == 1) {
- document.location = data.Callers[0].Sites[0].Href; // jump to sole caller
- return;
- }
-
- var html = "Callers of " + escapeHTML(data.Callee) + ": \n";
- for (var i = 0; i < data.Callers.length; i++) {
- var caller = data.Callers[i];
- html += "" + escapeHTML(caller.Func) + "";
- var sites = caller.Sites;
- if (sites != null && sites.length > 0) {
- html += " at line ";
- for (var j = 0; j < sites.length; j++) {
- if (j > 0) {
- html += ", ";
- }
- html += "" + makeAnchor(sites[j]) + "";
- }
- }
- html += " \n";
- }
- showLowFrame(html);
-};
-
-// onClickCallees is the onclick action for the '(' token of a function call.
-document.onClickCallees = function(index) {
- var data = document.ANALYSIS_DATA[index]
- if (data.Callees.length == 1) {
- document.location = data.Callees[0].Href; // jump to sole callee
- return;
- }
-
- var html = "Callees of this " + escapeHTML(data.Descr) + ": \n";
- for (var i = 0; i < data.Callees.length; i++) {
- html += "" + makeAnchor(data.Callees[i]) + " \n";
- }
- showLowFrame(html);
-};
-
-// onClickTypeInfo is the onclick action for identifiers declaring a named type.
-document.onClickTypeInfo = function(index) {
- var data = document.ANALYSIS_DATA[index];
- var html = "Type " + data.Name + ": " +
- " (size=" + data.Size + ", align=" + data.Align + ") \n";
- html += implementsHTML(data);
- html += methodsetHTML(data);
- showLowFrame(html);
-};
-
-// implementsHTML returns HTML for the implements relation of the
-// specified TypeInfoJSON value.
-function implementsHTML(info) {
- var html = "";
- if (info.ImplGroups != null) {
- for (var i = 0; i < info.ImplGroups.length; i++) {
- var group = info.ImplGroups[i];
- var x = "" + escapeHTML(group.Descr) + " ";
- for (var j = 0; j < group.Facts.length; j++) {
- var fact = group.Facts[j];
- var y = "" + makeAnchor(fact.Other) + "";
- if (fact.ByKind != null) {
- html += escapeHTML(fact.ByKind) + " type " + y + " implements " + x;
- } else {
- html += x + " implements " + y;
- }
- html += " \n";
- }
- }
- }
- return html;
-}
-
-
-// methodsetHTML returns HTML for the methodset of the specified
-// TypeInfoJSON value.
-function methodsetHTML(info) {
- var html = "";
- if (info.Methods != null) {
- for (var i = 0; i < info.Methods.length; i++) {
- html += "" + makeAnchor(info.Methods[i]) + " \n";
- }
- }
- return html;
-}
-
-// onClickComm is the onclick action for channel "make" and "<-"
-// send/receive tokens.
-document.onClickComm = function(index) {
- var ops = document.ANALYSIS_DATA[index].Ops
- if (ops.length == 1) {
- document.location = ops[0].Op.Href; // jump to sole element
- return;
- }
-
- var html = "Operations on this channel: \n";
- for (var i = 0; i < ops.length; i++) {
- html += makeAnchor(ops[i].Op) + " by " + escapeHTML(ops[i].Fn) + " \n";
- }
- if (ops.length == 0) {
- html += "(none) \n";
- }
- showLowFrame(html);
-};
-
-$(window).load(function() {
- // Scroll window so that first selection is visible.
- // (This means we don't need to emit id='L%d' spans for each line.)
- // TODO(adonovan): ideally, scroll it so that it's under the pointer,
- // but I don't know how to get the pointer y coordinate.
- var elts = document.getElementsByClassName("selection");
- if (elts.length > 0) {
- elts[0].scrollIntoView()
- }
-});
-
-// setupTypeInfo populates the "Implements" and "Method set" toggle for
-// each type in the package doc.
-function setupTypeInfo() {
- for (var i in document.ANALYSIS_DATA) {
- var data = document.ANALYSIS_DATA[i];
-
- var el = document.getElementById("implements-" + i);
- if (el != null) {
- // el != null => data is TypeInfoJSON.
- if (data.ImplGroups != null) {
- el.innerHTML = implementsHTML(data);
- el.parentNode.parentNode.style.display = "block";
- }
- }
-
- var el = document.getElementById("methodset-" + i);
- if (el != null) {
- // el != null => data is TypeInfoJSON.
- if (data.Methods != null) {
- el.innerHTML = methodsetHTML(data);
- el.parentNode.parentNode.style.display = "block";
- }
- }
- }
-}
-
-function setupCallgraphs() {
- if (document.CALLGRAPH == null) {
- return
- }
- document.getElementById("pkg-callgraph").style.display = "block";
-
- var treeviews = document.getElementsByClassName("treeview");
- for (var i = 0; i < treeviews.length; i++) {
- var tree = treeviews[i];
- if (tree.id == null || tree.id.indexOf("callgraph-") != 0) {
- continue;
- }
- var id = tree.id.substring("callgraph-".length);
- $(tree).treeview({collapsed: true, animated: "fast"});
- document.cgAddChildren(tree, tree, [id]);
- tree.parentNode.parentNode.style.display = "block";
- }
-}
-
-document.cgAddChildren = function(tree, ul, indices) {
- if (indices != null) {
- for (var i = 0; i < indices.length; i++) {
- var li = cgAddChild(tree, ul, document.CALLGRAPH[indices[i]]);
- if (i == indices.length - 1) {
- $(li).addClass("last");
- }
- }
- }
- $(tree).treeview({animated: "fast", add: ul});
-}
-
-// cgAddChild adds an
element for document.CALLGRAPH node cgn to
-// the parent
element ul. tree is the tree's root
element.
-function cgAddChild(tree, ul, cgn) {
- var li = document.createElement("li");
- ul.appendChild(li);
- li.className = "closed";
-
- var code = document.createElement("code");
-
- if (cgn.Callees != null) {
- $(li).addClass("expandable");
-
- // Event handlers and innerHTML updates don't play nicely together,
- // hence all this explicit DOM manipulation.
- var hitarea = document.createElement("div");
- hitarea.className = "hitarea expandable-hitarea";
- li.appendChild(hitarea);
-
- li.appendChild(code);
-
- var childUL = document.createElement("ul");
- li.appendChild(childUL);
- childUL.setAttribute('style', "display: none;");
-
- var onClick = function() {
- document.cgAddChildren(tree, childUL, cgn.Callees);
- hitarea.removeEventListener('click', onClick)
- };
- hitarea.addEventListener('click', onClick);
-
- } else {
- li.appendChild(code);
- }
- code.innerHTML += " " + makeAnchor(cgn.Func);
- return li
-}
-
-})();
diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/implements.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/implements.html
deleted file mode 100644
index 5f65b86..0000000
--- a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/implements.html
+++ /dev/null
@@ -1,9 +0,0 @@
-
- In the call graph viewer below, each node
- is a function belonging to this package
- and its children are the functions it
- calls—perhaps dynamically.
-
-
- The root nodes are the entry points of the
- package: functions that may be called from
- outside the package.
- There may be non-exported or anonymous
- functions among them if they are called
- dynamically from another package.
-
-
- Click a node to visit that function's source code.
- From there you can visit its callers by
- clicking its declaring func
- token.
-
-
- Functions may be omitted if they were
- determined to be unreachable in the
- particular programs or tests that were
- analyzed.
-
- {{if ne $.IfaceLink ""}}
- The stub package, {{$.PDoc.Name}}iface, can be used to provide alternative implementations of service clients,
- such as mocking the client for testing.
- {{end}}
-
-
-
- {{example_html $ ""}}
-
-
-
-
Operations ▹
-
-
-
Operations ▾
-
-
-
-
- {{range .Funcs}}
- {{$name_html := html .Name}}
-
- In the call graph viewer below, each node
- is a function belonging to this package
- and its children are the functions it
- calls—perhaps dynamically.
-
-
- The root nodes are the entry points of the
- package: functions that may be called from
- outside the package.
- There may be non-exported or anonymous
- functions among them if they are called
- dynamically from another package.
-
-
- Click a node to visit that function's source code.
- From there you can visit its callers by
- clicking its declaring func
- token.
-
-
- Functions may be omitted if they were
- determined to be unreachable in the
- particular programs or tests that were
- analyzed.
-
- aws-sdk-go is the official AWS SDK for the Go programming language.
-
- Checkout our release notes for information about the latest bug fixes, updates, and features added to the SDK.
-
-
Installing
-
- If you are using Go 1.5 with the GO15VENDOREXPERIMENT=1 vendoring flag you can use the following to get the SDK as the SDK's runtime dependencies are vendored in the vendor folder.
-
-
$ go get -u github.com/aws/aws-sdk-go
-
- Otherwise you'll need to tell Go to get the SDK and all of its dependencies.
-
-
$ go get -u github.com/aws/aws-sdk-go/...
-
Configuring Credentials
-
- Before using the SDK, ensure that you've configured credentials. The best way to configure credentials on a development machine is to use the ~/.aws/credentials file, which might look like:
-
- The AWS SDK for Go does not support the AWS CLI's config file. The SDK will not use any contents from this file. The SDK only supports the shared credentials file (~/aws/credentials). #384 tracks this feature request discussion.
-
-
Using the Go SDK
-
- To use a service in the SDK, create a service variable by calling the New() function. Once you have a service client, you can call API operations which each return response data and a possible error.
-
- To list a set of instance IDs from EC2, you could run:
-
-
- package main
-
- import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/ec2"
- )
-
- func main() {
- // Create an EC2 service object in the "us-west-2" region
- // Note that you can also configure your region globally by
- // exporting the AWS_REGION environment variable
- svc := ec2.New(session.New(), &aws.Config{Region: aws.String("us-west-2")})
-
- // Call the DescribeInstances Operation
- resp, err := svc.DescribeInstances(nil)
- if err != nil {
- panic(err)
- }
-
- // resp has all of the response data, pull out instance IDs:
- fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
- for idx, res := range resp.Reservations {
- fmt.Println(" > Number of instances: ", len(res.Instances))
- for _, inst := range resp.Reservations[idx].Instances {
- fmt.Println(" - Instance ID: ", *inst.InstanceId)
- }
- }
- }
-
-
-
-
diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb
deleted file mode 100644
index 9882707..0000000
--- a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb
+++ /dev/null
@@ -1,187 +0,0 @@
-require 'yard'
-require 'yard-go'
-
-module GoLinksHelper
- def signature(obj, link = true, show_extras = true, full_attr_name = true)
- case obj
- when YARDGo::CodeObjects::FuncObject
- if link && obj.has_tag?(:service_operation)
- ret = signature_types(obj, !link)
- args = obj.parameters.map {|m| m[0].split(/\s+/).last }.join(", ")
- line = "#{obj.name}(#{args}) #{ret}"
- return link ? linkify(obj, line) : line
- end
- end
-
- super(obj, link, show_extras, full_attr_name)
- end
-
- def html_syntax_highlight(source, type = nil)
- src = super(source, type || :go)
- object.has_tag?(:service_operation) ? link_types(src) : src
- end
-end
-
-YARD::Templates::Helpers::HtmlHelper.send(:prepend, GoLinksHelper)
-YARD::Templates::Engine.register_template_path(File.dirname(__FILE__) + '/templates')
-
-YARD::Parser::SourceParser.after_parse_list do
- YARD::Registry.all(:struct).each do |obj|
- if obj.file =~ /\/?service\/(.+?)\/(service|api)\.go$/
- obj.add_tag YARD::Tags::Tag.new(:service, $1)
- obj.groups = ["Constructor Functions", "Service Operations", "Request Methods", "Pagination Methods"]
- end
- end
-
- YARD::Registry.all(:method).each do |obj|
- if obj.file =~ /service\/.+?\/api\.go$/ && obj.scope == :instance
- if obj.name.to_s =~ /Pages$/
- obj.group = "Pagination Methods"
- opname = obj.name.to_s.sub(/Pages$/, '')
- obj.docstring = <<-eof
-#{obj.name} iterates over the pages of a {#{opname} #{opname}()} operation, calling the `fn`
-function callback with the response data in each page. To stop iterating, return `false` from
-the function callback.
-
-@note This operation can generate multiple requests to a service.
-@example Iterating over at most 3 pages of a #{opname} operation
- pageNum := 0
- err := client.#{obj.name}(params, func(page *#{obj.parent.parent.name}.#{obj.parameters[1][0].split("*").last}, lastPage bool) bool {
- pageNum++
- fmt.Println(page)
- return pageNum <= 3
- })
-@see #{opname}
-eof
- obj.add_tag YARD::Tags::Tag.new(:paginator, '')
- elsif obj.name.to_s =~ /Request$/
- obj.group = "Request Methods"
- obj.signature = obj.name.to_s
- obj.parameters = []
- opname = obj.name.to_s.sub(/Request$/, '')
- obj.docstring = <<-eof
-#{obj.name} generates a {aws/request.Request} object representing the client request for
-the {#{opname} #{opname}()} operation. The `output` return value can be used to capture
-response data after {aws/request.Request.Send Request.Send()} is called.
-
-Creating a request object using this method should be used when you want to inject
-custom logic into the request lifecycle using a custom handler, or if you want to
-access properties on the request object before or after sending the request. If
-you just want the service response, call the {#{opname} service operation method}
-directly instead.
-
-@note You must call the {aws/request.Request.Send Send()} method on the returned
- request object in order to execute the request.
-@example Sending a request using the #{obj.name}() method
- req, resp := client.#{obj.name}(params)
- err := req.Send()
-
- if err == nil { // resp is now filled
- fmt.Println(resp)
- }
-eof
- obj.add_tag YARD::Tags::Tag.new(:request_method, '')
- else
- obj.group = "Service Operations"
- obj.add_tag YARD::Tags::Tag.new(:service_operation, '')
- if ex = obj.tag(:example)
- ex.name = "Calling the #{obj.name} operation"
- end
- end
- end
- end
-
- apply_docs
-end
-
-def apply_docs
- svc_pkg = YARD::Registry.at('service')
- return if svc_pkg.nil?
-
- pkgs = svc_pkg.children.select {|t| t.type == :package }
- pkgs.each do |pkg|
- svc = pkg.children.find {|t| t.has_tag?(:service) }
- ctor = P(svc, ".New")
- svc_name = ctor.source[/ServiceName:\s*"(.+?)",/, 1]
- api_ver = ctor.source[/APIVersion:\s*"(.+?)",/, 1]
- log.progress "Parsing service documentation for #{svc_name} (#{api_ver})"
- file = Dir.glob("models/apis/#{svc_name}/#{api_ver}/docs-2.json").sort.last
- next if file.nil?
-
- next if svc.nil?
- exmeth = svc.children.find {|s| s.has_tag?(:service_operation) }
- pkg.docstring += <<-eof
-
-@example Sending a request using the {#{svc.name}} client
- client := #{pkg.name}.New(nil)
- params := {pkg.name}.#{exmeth.parameters.first[0].split("*").last}{...}
- resp, err := client.#{exmeth.name}(params)
-@see #{svc.name}
-@version #{api_ver}
-eof
-
- ctor.docstring += <<-eof
-
-@example Constructing a client using default configuration
- client := #{pkg.name}.New(nil)
-
-@example Constructing a client with custom configuration
- config := aws.NewConfig().WithRegion("us-west-2")
- client := #{pkg.name}.New(config)
-eof
-
- json = JSON.parse(File.read(file))
- if svc
- apply_doc(svc, json["service"])
- end
-
- json["operations"].each do |op, doc|
- if doc && obj = svc.children.find {|t| t.name.to_s.downcase == op.downcase }
- apply_doc(obj, doc)
- end
- end
-
- json["shapes"].each do |shape, data|
- shape = shape_name(shape)
- if obj = pkg.children.find {|t| t.name.to_s.downcase == shape.downcase }
- apply_doc(obj, data["base"])
- end
-
- data["refs"].each do |refname, doc|
- refshape, member = *refname.split("$")
- refshape = shape_name(refshape)
- if refobj = pkg.children.find {|t| t.name.to_s.downcase == refshape.downcase }
- if m = refobj.children.find {|t| t.name.to_s.downcase == member.downcase }
- apply_doc(m, doc || data["base"])
- end
- end
- end if data["refs"]
- end
- end
-end
-
-def apply_doc(obj, doc)
- tags = obj.docstring.tags || []
- obj.docstring = clean_docstring(doc)
- tags.each {|t| obj.docstring.add_tag(t) }
-end
-
-def shape_name(shape)
- shape.sub(/Request$/, "Input").sub(/Response$/, "Output")
-end
-
-def clean_docstring(docs)
- return nil unless docs
- docs = docs.gsub(//m, '')
- docs = docs.gsub(/.+?<\/fullname?>/m, '')
- docs = docs.gsub(/.+?<\/examples?>/m, '')
- docs = docs.gsub(/\s*<\/note>/m, '')
- docs = docs.gsub(/(.+?)<\/a>/, '\1')
- docs = docs.gsub(/(.+?)<\/note>/m) do
- text = $1.gsub(/<\/?p>/, '')
- "
diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb
deleted file mode 100644
index 9038945..0000000
--- a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb
+++ /dev/null
@@ -1,20 +0,0 @@
-def init
- super
- sections.place(:request_methods, :paginators).after(:method_summary)
-end
-
-def groups(list, type = "Method")
- super(list.reject {|o| o.has_tag?(:paginator) || o.has_tag?(:request_method) }, type)
-end
-
-def paginators
- @items = object.children.select {|o| o.has_tag?(:paginator) }
- return if @items.size == 0
- erb(:paginators)
-end
-
-def request_methods
- @items = object.children.select {|o| o.has_tag?(:request_method) }
- return if @items.size == 0
- erb(:request_methods)
-end
diff --git a/vendor/github.com/aws/aws-sdk-go/example/aws/request/handleServiceErrorCodes/README.md b/vendor/github.com/aws/aws-sdk-go/example/aws/request/handleServiceErrorCodes/README.md
deleted file mode 100644
index afc7063..0000000
--- a/vendor/github.com/aws/aws-sdk-go/example/aws/request/handleServiceErrorCodes/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Handling Specific Service Error Codes
-
-This examples highlights how you can use the `awserr.Error` type to perform logic based on specific error codes returned by service API operations.
-
-In this example the `S3` `GetObject` API operation is used to request the contents of a object in S3. The example handles the `NoSuchBucket` and `NoSuchKey` error codes printing custom messages to stderr. If Any other error is received a generic message is printed.
-
-## Usage
-
-Will make a request to S3 for the contents of an object. If the request was successful, and the object was found the object's path and size will be printed to stdout.
-
-If the object's bucket or key does not exist a specific error message will be printed to stderr for the error.
-
-Any other error will be printed as an unknown error.
-
-```sh
-go run -tags example handleServiceErrorCodes.go mybucket mykey
-```
diff --git a/vendor/github.com/aws/aws-sdk-go/example/aws/request/handleServiceErrorCodes/handleServiceErrorCodes.go b/vendor/github.com/aws/aws-sdk-go/example/aws/request/handleServiceErrorCodes/handleServiceErrorCodes.go
deleted file mode 100644
index 1e36634..0000000
--- a/vendor/github.com/aws/aws-sdk-go/example/aws/request/handleServiceErrorCodes/handleServiceErrorCodes.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// +build example
-
-package main
-
-import (
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/s3"
-)
-
-func exitErrorf(msg string, args ...interface{}) {
- fmt.Fprintf(os.Stderr, msg+"\n", args...)
- os.Exit(1)
-}
-
-// Will make a request to S3 for the contents of an object. If the request
-// was successful, and the object was found the object's path and size will be
-// printed to stdout.
-//
-// If the object's bucket or key does not exist a specific error message will
-// be printed to stderr for the error.
-//
-// Any other error will be printed as an unknown error.
-//
-// Usage: handleServiceErrorCodes
-func main() {
- if len(os.Args) < 3 {
- exitErrorf("Usage: %s ", filepath.Base(os.Args[0]))
- }
- sess, err := session.NewSession()
- if err != nil {
- exitErrorf("failed to create session,", err)
- }
-
- svc := s3.New(sess)
- resp, err := svc.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(os.Args[1]),
- Key: aws.String(os.Args[2]),
- })
-
- if err != nil {
- // Casting to the awserr.Error type will allow you to inspect the error
- // code returned by the service in code. The error code can be used
- // to switch on context specific functionality. In this case a context
- // specific error message is printed to the user based on the bucket
- // and key existing.
- //
- // For information on other S3 API error codes see:
- // http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
- if aerr, ok := err.(awserr.Error); ok {
- switch aerr.Code() {
- case "NoSuchBucket":
- exitErrorf("bucket %s does not exist", os.Args[1])
- case "NoSuchKey":
- exitErrorf("object with key %s does not exist in bucket %s", os.Args[2], os.Args[1])
- }
- }
- exitErrorf("unknown error occured, %v", err)
- }
- defer resp.Body.Close()
-
- fmt.Printf("s3://%s/%s exists. size: %d\n", os.Args[1], os.Args[2],
- aws.Int64Value(resp.ContentLength))
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/README.md b/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/README.md
deleted file mode 100644
index 567d226..0000000
--- a/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Example
-
-This example shows how the CloudFront CookieSigner can be used to generate signed cookies to provided short term access to restricted resourced fronted by CloudFront.
-
-# Usage
-Makes a request for object using CloudFront cookie signing, and outputs the contents of the object to stdout.
-
-```sh
-go run -tags example signCookies.go -file -id -r -g
- {{end}} - {{.XML}} -