diff --git a/.bra.toml b/.bra.toml index c401b003dcf7b..ef1a925158d2d 100644 --- a/.bra.toml +++ b/.bra.toml @@ -9,7 +9,7 @@ watch_dirs = [ "$WORKDIR/public/views", "$WORKDIR/conf", ] -watch_exts = [".go", "conf/*"] +watch_exts = [".go", ".ini", ".toml", ".html"] build_delay = 1500 cmds = [ ["go", "build", "-o", "./bin/grafana-server"], diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000000..5760be5836966 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org +root = true + +[*] +indent_style = space +indent_size = 2 +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.md] +trim_trailing_whitespace = false diff --git a/.gitignore b/.gitignore index 99ed4c4adfd7e..0ac42cbcb4b04 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,10 @@ coverage/ .aws-config.json awsconfig /dist +/emails/dist +/public_gen /tmp +vendor/phantomjs/phantomjs docs/AWS_S3_BUCKET docs/GIT_BRANCH @@ -27,4 +30,5 @@ public/css/*.min.css conf/custom.ini fig.yml profile.cov - +grafana +.notouch diff --git a/.jshintrc b/.jshintrc index e8f044e1da9de..16904c4241344 100644 --- a/.jshintrc +++ b/.jshintrc @@ -23,7 +23,7 @@ "laxcomma": true, "sub": true, "unused": true, - "maxdepth": 5, + "maxdepth": 6, "maxlen": 140, "globals": { @@ -32,4 +32,4 @@ "Chromath": false, "setImmediate": true } -} \ No newline at end of file +} diff --git a/CHANGELOG.md b/CHANGELOG.md index c046d3364ccd2..81b315d632392 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,112 @@ -# 2.1.0 (unreleased - master branch) +# 2.6.0 (unreleased) + +### New Table Panel +* **table**: New powerful and flexible table panel, closes [#215](https://github.com/grafana/grafana/issues/215) + +### Enhancements +* **CloudWatch**: Support for multiple AWS Credentials, closes [#3053](https://github.com/grafana/grafana/issues/3053), [#3080](https://github.com/grafana/grafana/issues/3080) +* **Elasticsearch**: Support for dynamic daily indices for annotations, closes [#3061](https://github.com/grafana/grafana/issues/3061) +* **Graph Panel**: Option to hide series with all zeroes from legend and tooltip, closes [#1381](https://github.com/grafana/grafana/issues/1381), [#3336](https://github.com/grafana/grafana/issues/3336) + + +### Bug Fixes +* **cloudwatch**: fix for handling of period for long time ranges, fixes [#3086](https://github.com/grafana/grafana/issues/3086) +* **dashboard**: fix for collapse row by clicking on row title, fixes [#3065](https://github.com/grafana/grafana/issues/3065) +* **influxdb**: fix for relative time ranges `last x months` and `last x years`, fixes [#3067](https://github.com/grafana/grafana/issues/3067) +* **graph**: layout fix for color picker when right side legend was enabled, fixes [#3093](https://github.com/grafana/grafana/issues/3093) +* **elasticsearch**: disabling elastic query (via eye) caused error, fixes [#3300](https://github.com/grafana/grafana/issues/3300) + +# 2.5 (2015-10-28) + +**New Feature: Mix data sources** +- A built in data source is now available named `-- Mixed --`, When picked in the metrics tab, +it allows you to add queries of differnet data source types & instances to the same graph/panel! +[Issue #436](https://github.com/grafana/grafana/issues/436) + +**New Feature: Elasticsearch Metrics Query Editor and Viz Support** +- Feature rich query editor and processing features enables you to issues all kind of metric queries to Elasticsearch +- See [Issue #1034](https://github.com/grafana/grafana/issues/1034) for more info. + +**New Feature: New and much improved time picker** +- Support for quick ranges like `Today`, `This day last week`, `This week`, `The day so far`, etc. +- Improved UI and improved support for UTC, [Issue #2761](https://github.com/grafana/grafana/issues/2761) for more info. + +**User Onboarding** +- Org admin can now send email invites (or invite links) to people who are not yet Grafana users +- Sign up flow now supports email verification (if enabled) +- See [Issue #2353](https://github.com/grafana/grafana/issues/2353) for more info. + +**Other new Features && Enhancements** +- [Pull #2720](https://github.com/grafana/grafana/pull/2720). Admin: Initial basic quota support (per Org) +- [Issue #2577](https://github.com/grafana/grafana/issues/2577). Panel: Resize handles in panel bottom right corners for easy width and height change +- [Issue #2457](https://github.com/grafana/grafana/issues/2457). Admin: admin page for all grafana organizations (list / edit view) +- [Issue #1186](https://github.com/grafana/grafana/issues/1186). Time Picker: New option `today`, will set time range from midnight to now +- [Issue #2647](https://github.com/grafana/grafana/issues/2647). InfluxDB: You can now set group by time interval on each query +- [Issue #2599](https://github.com/grafana/grafana/issues/2599). InfluxDB: Improved alias support, you can now use the `AS` clause for each select statement +- [Issue #2708](https://github.com/grafana/grafana/issues/2708). InfluxDB: You can now set math expression for select clauses. +- [Issue #1575](https://github.com/grafana/grafana/issues/1575). Drilldown link: now you can click on the external link icon in the panel header to access drilldown links! +- [Issue #1646](https://github.com/grafana/grafana/issues/1646). OpenTSDB: Fetch list of aggregators from OpenTSDB +- [Issue #2955](https://github.com/grafana/grafana/issues/2955). Graph: More axis units (Length, Volume, Temperature, Pressure, etc), thanks @greglook +- [Issue #2928](https://github.com/grafana/grafana/issues/2928). LDAP: Support for searching for groups memberships, i.e. POSIX (no memberOf) schemas, also multiple ldap servers, and root ca cert, thanks @abligh + +**Fixes** +- [Issue #2413](https://github.com/grafana/grafana/issues/2413). InfluxDB 0.9: Fix for handling empty series object in response from influxdb +- [Issue #2574](https://github.com/grafana/grafana/issues/2574). Snapshot: Fix for snapshot with expire 7 days option, 7 days option not correct, was 7 hours +- [Issue #2568](https://github.com/grafana/grafana/issues/2568). AuthProxy: Fix for server side rendering of panel when using auth proxy +- [Issue #2490](https://github.com/grafana/grafana/issues/2490). Graphite: Dashboard import was broken in 2.1 and 2.1.1, working now +- [Issue #2565](https://github.com/grafana/grafana/issues/2565). TimePicker: Fix for when you applied custom time range it did not refreh dashboard +- [Issue #2563](https://github.com/grafana/grafana/issues/2563). Annotations: Fixed issue when html sanitizer failes for title to annotation body, now fallbacks to html escaping title and text +- [Issue #2564](https://github.com/grafana/grafana/issues/2564). Templating: Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url) +- [Issue #2620](https://github.com/grafana/grafana/issues/2620). Graph: multi series tooltip did no highlight correct point when stacking was enabled and series were of different resolution +- [Issue #2636](https://github.com/grafana/grafana/issues/2636). InfluxDB: Do no show template vars in dropdown for tag keys and group by keys +- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (seperated by dots) + +**Breaking Changes** +- Notice to makers/users of custom data sources, there is a minor breaking change in 2.2 that +require an update to custom data sources for them to work in 2.2. [Read this doc](https://github.com/grafana/grafana/tree/master/docs/sources/datasources/plugin_api.md) for more on the +data source api change. +- Data source api changes, [PLUGIN_CHANGES.md](https://github.com/grafana/grafana/blob/master/public/app/plugins/PLUGIN_CHANGES.md) +- The duplicate query function used in data source editors is changed, and moveMetricQuery function was renamed + +**Tech (Note for devs)** +Started using Typescript (transpiled to ES5), uncompiled typescript files and less files are in public folder (in source tree) +This folder is never modified by build steps. Compiled css and javascript files are put in public_gen, all other files +that do not undergo transformation are just copied from public to public_gen, it is public_gen that is used by grafana-server +if it is found. + +Grunt & Watch tasks: +- `grunt` : default task, will remove public_gen, copy over all files from public, do less & typescript compilation +- `grunt watch`: will watch for changes to less, and typescript files and compile them to public_gen, and for other files it will just copy them to public_gen + + +# 2.1.3 (2015-08-24) + +**Fixes** +- [Issue #2580](https://github.com/grafana/grafana/issues/2580). Packaging: ldap.toml was not marked as config file and could be overwritten in upgrade +- [Issue #2564](https://github.com/grafana/grafana/issues/2564). Templating: Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url) + +# 2.1.2 (2015-08-20) + +**Fixes** +- [Issue #2558](https://github.com/grafana/grafana/issues/2558). DragDrop: Fix for broken drag drop behavior +- [Issue #2534](https://github.com/grafana/grafana/issues/2534). Templating: fix for setting template variable value via url and having repeated panels or rows + +# 2.1.1 (2015-08-11) + +**Fixes** +- [Issue #2443](https://github.com/grafana/grafana/issues/2443). Templating: Fix for buggy repeat row behavior when combined with with repeat panel due to recent change before 2.1 release +- [Issue #2442](https://github.com/grafana/grafana/issues/2442). Templating: Fix text panel when using template variables in text in in repeated panel +- [Issue #2446](https://github.com/grafana/grafana/issues/2446). InfluxDB: Fix for using template vars inside alias field (InfluxDB 0.9) +- [Issue #2460](https://github.com/grafana/grafana/issues/2460). SinglestatPanel: Fix to handle series with no data points +- [Issue #2461](https://github.com/grafana/grafana/issues/2461). LDAP: Fix for ldap users with empty email address +- [Issue #2484](https://github.com/grafana/grafana/issues/2484). Graphite: Fix bug when using series ref (#A-Z) and referenced series is hidden in query editor. +- [Issue #1896](https://github.com/grafana/grafana/issues/1896). Postgres: Dashboard search is now case insensitive when using Postgres + +**Enhancements** +- [Issue #2477](https://github.com/grafana/grafana/issues/2477). InfluxDB(0.9): Added more condition operators (`<`, `>`, `<>`, `!~`), thx @thuck +- [Issue #2483](https://github.com/grafana/grafana/issues/2484). InfluxDB(0.9): Use $col as option in alias patterns, thx @thuck + +# 2.1.0 (2015-08-04) **Data sources** - [Issue #1525](https://github.com/grafana/grafana/issues/1525). InfluxDB: Full support for InfluxDB 0.9 with new adapted query editor @@ -103,6 +211,10 @@ # 2.0.0-Beta1 (2015-03-30) +**Important Note** + +Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFCANT change to Grafana + **New features** - [Issue #1623](https://github.com/grafana/grafana/issues/1623). Share Dashboard: Dashboard snapshot sharing (dash and data snapshot), save to local or save to public snapshot dashboard snapshots.raintank.io site - [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index a1808fa245ca4..ee9d78e14707b 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/grafana/grafana", - "GoVersion": "go1.4.2", + "GoVersion": "go1.5", "Packages": [ "./pkg/..." ], @@ -18,10 +18,65 @@ "ImportPath": "github.com/Unknwon/macaron", "Rev": "93de4f3fad97bf246b838f828e2348f46f21f20a" }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/ec2", + "Comment": "v0.10.4-18-gce51895", + "Rev": "ce51895e994693d65ab997ae48032bf13a9290b7" + }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "2df174808ee097f90d259e432cc04442cf60be21" }, + { + "ImportPath": "github.com/go-ini/ini", + "Comment": "v0-48-g060d7da", + "Rev": "060d7da055ba6ec5ea7a31f116332fe5efa04ce0" + }, { "ImportPath": "github.com/go-ldap/ldap", "Comment": "v1-19-g83e6542", @@ -45,6 +100,11 @@ "ImportPath": "github.com/gosimple/slug", "Rev": "8d258463b4459f161f51d6a357edacd3eef9d663" }, + { + "ImportPath": "github.com/jmespath/go-jmespath", + "Comment": "0.2.2", + "Rev": "3433f3ea46d9f8019119e7dd41274e112a2359a9" + }, { "ImportPath": "github.com/jtolds/gls", "Rev": "f1ac7f4f24f50328e6bc838ca4437d1612a0243c" diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 0000000000000..a52743bef1cd3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,105 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", err.Code(), err.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", err.Error()) +// +// // Get original error +// if origErr := err.Err(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + if e, ok := origErr.(Error); ok && e != nil { + return e + } + return newBaseError(code, message, origErr) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Printf("Error:", err.Error() +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 0000000000000..003a6e8067e4a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,135 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + origErr error +} + +// newBaseError returns an error object for the code, message, and err. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErr is the error object which will be nested under the new error to be returned. +func newBaseError(code, message string, origErr error) *baseError { + return &baseError{ + code: code, + message: message, + origErr: origErr, + } +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + return SprintError(b.code, b.message, "", b.origErr) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no error +// was set. +func (b baseError) OrigErr() error { + return b.origErr +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for request +// status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 0000000000000..8429470b9d723 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go new file mode 100644 index 0000000000000..84b7e3f34aa0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go @@ -0,0 +1,233 @@ +package awsutil_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "testing" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +func ExampleCopy() { + type Foo struct { + A int + B []*string + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + f1 := &Foo{A: 1, B: []*string{&str1, &str2}} + + // Do the copy + var f2 Foo + awsutil.Copy(&f2, f1) + + // Print the result + fmt.Println(awsutil.Prettify(f2)) + + // Output: + // { + // A: 1, + // B: ["hello","bye bye"] + // } +} + +func TestCopy(t *testing.T) { + type Foo struct { + A int + B []*string + C map[string]*int + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + int1 := 1 + int2 := 2 + f1 := &Foo{ + A: 1, + B: []*string{&str1, &str2}, + C: map[string]*int{ + "A": &int1, + "B": &int2, + }, + } + + // Do the copy + var f2 Foo + awsutil.Copy(&f2, f1) + + // Values are equal + assert.Equal(t, f2.A, f1.A) + assert.Equal(t, f2.B, f1.B) + assert.Equal(t, f2.C, f1.C) + + // But pointers are not! + str3 := "nothello" + int3 := 57 + f2.A = 100 + f2.B[0] = &str3 + f2.C["B"] = &int3 + assert.NotEqual(t, f2.A, f1.A) + assert.NotEqual(t, f2.B, f1.B) + assert.NotEqual(t, f2.C, f1.C) +} + +func TestCopyNestedWithUnexported(t *testing.T) { + type Bar struct { + a int + B int + } + type Foo struct { + A string + B Bar + } + + f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}} + + var f2 Foo + awsutil.Copy(&f2, f1) + + // Values match + assert.Equal(t, f2.A, f1.A) + assert.NotEqual(t, f2.B, f1.B) + assert.NotEqual(t, f2.B.a, f1.B.a) + assert.Equal(t, f2.B.B, f2.B.B) +} + +func TestCopyIgnoreNilMembers(t *testing.T) { + type Foo struct { + A *string + B []string + C map[string]string + } + + f := &Foo{} + assert.Nil(t, f.A) + assert.Nil(t, f.B) + assert.Nil(t, f.C) + + var f2 Foo + awsutil.Copy(&f2, f) + assert.Nil(t, f2.A) + assert.Nil(t, f2.B) + assert.Nil(t, f2.C) + + fcopy := awsutil.CopyOf(f) + f3 := fcopy.(*Foo) + assert.Nil(t, f3.A) + assert.Nil(t, f3.B) + assert.Nil(t, f3.C) +} + +func TestCopyPrimitive(t *testing.T) { + str := "hello" + var s string + awsutil.Copy(&s, &str) + assert.Equal(t, "hello", s) +} + +func TestCopyNil(t *testing.T) { + var s string + awsutil.Copy(&s, nil) + assert.Equal(t, "", s) +} + +func TestCopyReader(t *testing.T) { + var buf io.Reader = bytes.NewReader([]byte("hello world")) + var r io.Reader + awsutil.Copy(&r, buf) + b, err := ioutil.ReadAll(r) + assert.NoError(t, err) + assert.Equal(t, []byte("hello world"), b) + + // empty bytes because this is not a deep copy + b, err = ioutil.ReadAll(buf) + assert.NoError(t, err) + assert.Equal(t, []byte(""), b) +} + +func TestCopyDifferentStructs(t *testing.T) { + type SrcFoo struct { + A int + B []*string + C map[string]*int + SrcUnique string + SameNameDiffType int + unexportedPtr *int + ExportedPtr *int + } + type DstFoo struct { + A int + B []*string + C map[string]*int + DstUnique int + SameNameDiffType string + unexportedPtr *int + ExportedPtr *int + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + int1 := 1 + int2 := 2 + f1 := &SrcFoo{ + A: 1, + B: []*string{&str1, &str2}, + C: map[string]*int{ + "A": &int1, + "B": &int2, + }, + SrcUnique: "unique", + SameNameDiffType: 1, + unexportedPtr: &int1, + ExportedPtr: &int2, + } + + // Do the copy + var f2 DstFoo + awsutil.Copy(&f2, f1) + + // Values are equal + assert.Equal(t, f2.A, f1.A) + assert.Equal(t, f2.B, f1.B) + assert.Equal(t, f2.C, f1.C) + assert.Equal(t, "unique", f1.SrcUnique) + assert.Equal(t, 1, f1.SameNameDiffType) + assert.Equal(t, 0, f2.DstUnique) + assert.Equal(t, "", f2.SameNameDiffType) + assert.Equal(t, int1, *f1.unexportedPtr) + assert.Nil(t, f2.unexportedPtr) + assert.Equal(t, int2, *f1.ExportedPtr) + assert.Equal(t, int2, *f2.ExportedPtr) +} + +func ExampleCopyOf() { + type Foo struct { + A int + B []*string + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + f1 := &Foo{A: 1, B: []*string{&str1, &str2}} + + // Do the copy + v := awsutil.CopyOf(f1) + var f2 *Foo = v.(*Foo) + + // Print the result + fmt.Println(awsutil.Prettify(f2)) + + // Output: + // { + // A: 1, + // B: ["hello","bye bye"] + // } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 0000000000000..59fa4a558a9ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go new file mode 100644 index 0000000000000..7a5db6e49bc2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go @@ -0,0 +1,29 @@ +package awsutil_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +func TestDeepEqual(t *testing.T) { + cases := []struct { + a, b interface{} + equal bool + }{ + {"a", "a", true}, + {"a", "b", false}, + {"a", aws.String(""), false}, + {"a", nil, false}, + {"a", aws.String("a"), true}, + {(*bool)(nil), (*bool)(nil), true}, + {(*bool)(nil), (*string)(nil), false}, + {nil, nil, true}, + } + + for i, c := range cases { + assert.Equal(t, c.equal, awsutil.DeepEqual(c.a, c.b), "%d, a:%v b:%v, %t", i, c.a, c.b, c.equal) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 0000000000000..8c0dc2d8d24cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,210 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, create, caseSensitive) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if create && value.Kind() == reflect.Ptr && value.IsNil() { + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !create && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if create { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !create && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false); rvals != nil { + for _, rval := range rvals { + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go new file mode 100644 index 0000000000000..378a486484885 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go @@ -0,0 +1,108 @@ +package awsutil_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +type Struct struct { + A []Struct + z []Struct + B *Struct + D *Struct + C string + E map[string]string +} + +var data = Struct{ + A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, + z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, + B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}}, + C: "initial", +} +var data2 = Struct{A: []Struct{ + {A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}}, + {A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}}, +}} + +func TestValueAtPathSuccess(t *testing.T) { + var testCases = []struct { + expect []interface{} + data interface{} + path string + }{ + {[]interface{}{"initial"}, data, "C"}, + {[]interface{}{"value1"}, data, "A[0].C"}, + {[]interface{}{"value2"}, data, "A[1].C"}, + {[]interface{}{"value3"}, data, "A[2].C"}, + {[]interface{}{"value3"}, data, "a[2].c"}, + {[]interface{}{"value3"}, data, "A[-1].C"}, + {[]interface{}{"value1", "value2", "value3"}, data, "A[].C"}, + {[]interface{}{"terminal"}, data, "B . B . C"}, + {[]interface{}{"initial"}, data, "A.D.X || C"}, + {[]interface{}{"initial"}, data, "A[0].B || C"}, + {[]interface{}{ + Struct{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}}, + Struct{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}}, + }, data2, "A"}, + } + for i, c := range testCases { + v, err := awsutil.ValuesAtPath(c.data, c.path) + assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + } +} + +func TestValueAtPathFailure(t *testing.T) { + var testCases = []struct { + expect []interface{} + errContains string + data interface{} + path string + }{ + {nil, "", data, "C.x"}, + {nil, "SyntaxError: Invalid token: tDot", data, ".x"}, + {nil, "", data, "X.Y.Z"}, + {nil, "", data, "A[100].C"}, + {nil, "", data, "A[3].C"}, + {nil, "", data, "B.B.C.Z"}, + {nil, "", data, "z[-1].C"}, + {nil, "", nil, "A.B.C"}, + {[]interface{}{}, "", Struct{}, "A"}, + {nil, "", data, "A[0].B.C"}, + {nil, "", data, "D"}, + } + + for i, c := range testCases { + v, err := awsutil.ValuesAtPath(c.data, c.path) + if c.errContains != "" { + assert.Contains(t, err.Error(), c.errContains, "case %d, expected error, %s", i, c.path) + continue + } else { + assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + } + assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + } +} + +func TestSetValueAtPathSuccess(t *testing.T) { + var s Struct + awsutil.SetValueAtPath(&s, "C", "test1") + awsutil.SetValueAtPath(&s, "B.B.C", "test2") + awsutil.SetValueAtPath(&s, "B.D.C", "test3") + assert.Equal(t, "test1", s.C) + assert.Equal(t, "test2", s.B.B.C) + assert.Equal(t, "test3", s.B.D.C) + + awsutil.SetValueAtPath(&s, "B.*.C", "test0") + assert.Equal(t, "test0", s.B.B.C) + assert.Equal(t, "test0", s.B.D.C) + + var s2 Struct + awsutil.SetValueAtPath(&s2, "b.b.c", "test0") + assert.Equal(t, "test0", s2.B.B.C) + awsutil.SetValueAtPath(&s2, "A", []Struct{{}}) + assert.Equal(t, []Struct{{}}, s2.A) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 0000000000000..0de3eaa0f639c --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,103 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000000000..b6432f1a1188d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 0000000000000..63ab805b9c4ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,111 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no reponse data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 0000000000000..24d39ce5641ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,45 @@ +package client + +import ( + "math" + "math/rand" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 0000000000000..4778056ddfdae --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 0000000000000..f157f8b3b5a6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,248 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go new file mode 100644 index 0000000000000..fe97a31fc7656 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go @@ -0,0 +1,86 @@ +package aws + +import ( + "net/http" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +var testCredentials = credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + +var copyTestConfig = Config{ + Credentials: testCredentials, + Endpoint: String("CopyTestEndpoint"), + Region: String("COPY_TEST_AWS_REGION"), + DisableSSL: Bool(true), + HTTPClient: http.DefaultClient, + LogLevel: LogLevel(LogDebug), + Logger: NewDefaultLogger(), + MaxRetries: Int(3), + DisableParamValidation: Bool(true), + DisableComputeChecksums: Bool(true), + S3ForcePathStyle: Bool(true), +} + +func TestCopy(t *testing.T) { + want := copyTestConfig + got := copyTestConfig.Copy() + if !reflect.DeepEqual(*got, want) { + t.Errorf("Copy() = %+v", got) + t.Errorf(" want %+v", want) + } + + got.Region = String("other") + if got.Region == want.Region { + t.Errorf("Expect setting copy values not not reflect in source") + } +} + +func TestCopyReturnsNewInstance(t *testing.T) { + want := copyTestConfig + got := copyTestConfig.Copy() + if got == &want { + t.Errorf("Copy() = %p; want different instance as source %p", got, &want) + } +} + +var mergeTestZeroValueConfig = Config{} + +var mergeTestConfig = Config{ + Credentials: testCredentials, + Endpoint: String("MergeTestEndpoint"), + Region: String("MERGE_TEST_AWS_REGION"), + DisableSSL: Bool(true), + HTTPClient: http.DefaultClient, + LogLevel: LogLevel(LogDebug), + Logger: NewDefaultLogger(), + MaxRetries: Int(10), + DisableParamValidation: Bool(true), + DisableComputeChecksums: Bool(true), + S3ForcePathStyle: Bool(true), +} + +var mergeTests = []struct { + cfg *Config + in *Config + want *Config +}{ + {&Config{}, nil, &Config{}}, + {&Config{}, &mergeTestZeroValueConfig, &Config{}}, + {&Config{}, &mergeTestConfig, &mergeTestConfig}, +} + +func TestMerge(t *testing.T) { + for i, tt := range mergeTests { + got := tt.cfg.Copy() + got.MergeIn(tt.in) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Config %d %+v", i, tt.cfg) + t.Errorf(" Merge(%+v)", tt.in) + t.Errorf(" got %+v", got) + t.Errorf(" want %+v", tt.want) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 0000000000000..d6a7b08dffe49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,357 @@ +package aws + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go new file mode 100644 index 0000000000000..df7a3e5d2de51 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go @@ -0,0 +1,437 @@ +package aws + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var testCasesStringSlice = [][]string{ + {"a", "b", "c", "d", "e"}, + {"a", "b", "", "", "e"}, +} + +func TestStringSlice(t *testing.T) { + for idx, in := range testCasesStringSlice { + if in == nil { + continue + } + out := StringSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesStringValueSlice = [][]*string{ + {String("a"), String("b"), nil, String("c")}, +} + +func TestStringValueSlice(t *testing.T) { + for idx, in := range testCasesStringValueSlice { + if in == nil { + continue + } + out := StringValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := StringSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesStringMap = []map[string]string{ + {"a": "1", "b": "2", "c": "3"}, +} + +func TestStringMap(t *testing.T) { + for idx, in := range testCasesStringMap { + if in == nil { + continue + } + out := StringMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolSlice = [][]bool{ + {true, true, false, false}, +} + +func TestBoolSlice(t *testing.T) { + for idx, in := range testCasesBoolSlice { + if in == nil { + continue + } + out := BoolSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolValueSlice = [][]*bool{} + +func TestBoolValueSlice(t *testing.T) { + for idx, in := range testCasesBoolValueSlice { + if in == nil { + continue + } + out := BoolValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := BoolSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesBoolMap = []map[string]bool{ + {"a": true, "b": false, "c": true}, +} + +func TestBoolMap(t *testing.T) { + for idx, in := range testCasesBoolMap { + if in == nil { + continue + } + out := BoolMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntSlice = [][]int{ + {1, 2, 3, 4}, +} + +func TestIntSlice(t *testing.T) { + for idx, in := range testCasesIntSlice { + if in == nil { + continue + } + out := IntSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntValueSlice = [][]*int{} + +func TestIntValueSlice(t *testing.T) { + for idx, in := range testCasesIntValueSlice { + if in == nil { + continue + } + out := IntValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := IntSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesIntMap = []map[string]int{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestIntMap(t *testing.T) { + for idx, in := range testCasesIntMap { + if in == nil { + continue + } + out := IntMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64Slice = [][]int64{ + {1, 2, 3, 4}, +} + +func TestInt64Slice(t *testing.T) { + for idx, in := range testCasesInt64Slice { + if in == nil { + continue + } + out := Int64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64ValueSlice = [][]*int64{} + +func TestInt64ValueSlice(t *testing.T) { + for idx, in := range testCasesInt64ValueSlice { + if in == nil { + continue + } + out := Int64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Int64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesInt64Map = []map[string]int64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestInt64Map(t *testing.T) { + for idx, in := range testCasesInt64Map { + if in == nil { + continue + } + out := Int64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64Slice = [][]float64{ + {1, 2, 3, 4}, +} + +func TestFloat64Slice(t *testing.T) { + for idx, in := range testCasesFloat64Slice { + if in == nil { + continue + } + out := Float64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64ValueSlice = [][]*float64{} + +func TestFloat64ValueSlice(t *testing.T) { + for idx, in := range testCasesFloat64ValueSlice { + if in == nil { + continue + } + out := Float64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Float64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesFloat64Map = []map[string]float64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestFloat64Map(t *testing.T) { + for idx, in := range testCasesFloat64Map { + if in == nil { + continue + } + out := Float64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeSlice = [][]time.Time{ + {time.Now(), time.Now().AddDate(100, 0, 0)}, +} + +func TestTimeSlice(t *testing.T) { + for idx, in := range testCasesTimeSlice { + if in == nil { + continue + } + out := TimeSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeValueSlice = [][]*time.Time{} + +func TestTimeValueSlice(t *testing.T) { + for idx, in := range testCasesTimeValueSlice { + if in == nil { + continue + } + out := TimeValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := TimeSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesTimeMap = []map[string]time.Time{ + {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()}, +} + +func TestTimeMap(t *testing.T) { + for idx, in := range testCasesTimeMap { + if in == nil { + continue + } + out := TimeMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 0000000000000..1d3e656fd68e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,139 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ := strconv.ParseInt(slength, 10, 64) + r.HTTPRequest.ContentLength = length + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go new file mode 100644 index 0000000000000..632ea8d4ea1fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go @@ -0,0 +1,113 @@ +package corehandlers_test + +import ( + "fmt" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestValidateEndpointHandler(t *testing.T) { + os.Clearenv() + + svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2")) + svc.Handlers.Clear() + svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := req.Build() + + assert.NoError(t, err) +} + +func TestValidateEndpointHandlerErrorRegion(t *testing.T) { + os.Clearenv() + + svc := awstesting.NewClient() + svc.Handlers.Clear() + svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingRegion, err) +} + +type mockCredsProvider struct { + expired bool + retrieveCalled bool +} + +func (m *mockCredsProvider) Retrieve() (credentials.Value, error) { + m.retrieveCalled = true + return credentials.Value{}, nil +} + +func (m *mockCredsProvider) IsExpired() bool { + return m.expired +} + +func TestAfterRetryRefreshCreds(t *testing.T) { + os.Clearenv() + credProvider := &mockCredsProvider{} + + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewCredentials(credProvider), + MaxRetries: aws.Int(1), + }) + + svc.Handlers.Clear() + svc.Handlers.ValidateResponse.PushBack(func(r *request.Request) { + r.Error = awserr.New("UnknownError", "", nil) + r.HTTPResponse = &http.Response{StatusCode: 400} + }) + svc.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + r.Error = awserr.New("ExpiredTokenException", "", nil) + }) + svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + + assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired") + assert.False(t, credProvider.retrieveCalled) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + req.Send() + + assert.True(t, svc.Config.Credentials.IsExpired()) + assert.False(t, credProvider.retrieveCalled) + + _, err := svc.Config.Credentials.Get() + assert.NoError(t, err) + assert.True(t, credProvider.retrieveCalled) +} + +type testSendHandlerTransport struct{} + +func (t *testSendHandlerTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return nil, fmt.Errorf("mock error") +} + +func TestSendHandlerError(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + HTTPClient: &http.Client{ + Transport: &testSendHandlerTransport{}, + }, + }) + svc.Handlers.Clear() + svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler) + r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + + r.Send() + + assert.Error(t, r.Error) + assert.NotNil(t, r.HTTPResponse) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 0000000000000..3b53f5e026a9f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,144 @@ +package corehandlers + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = awserr.New("InvalidParameter", msg, nil) + } + } +}} + +// A validator validates values. Collects validations errors which occurs. +type validator struct { + errors []string +} + +// validateAny will validate any struct, slice or map type. All validations +// are also performed recursively for nested types. +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +// validateStruct will validate the struct value's fields. If the structure has +// nested types those types will be validated also. +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) + if err != nil { + v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) + continue + } + + v.validateAny(fvalue, path+prefix+f.Name) + } +} + +type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error + +func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { + for _, fn := range funcs { + if err := fn(f, fvalue); err != nil { + return err + } + } + return nil +} + +// Validates that a field has a valid value provided for required fields. +func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { + if f.Tag.Get("required") == "" { + return nil + } + + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return fmt.Errorf("missing required parameter") + } + default: + if !fvalue.IsValid() { + return fmt.Errorf("missing required parameter") + } + } + return nil +} + +// Validates that if a value is provided for a field, that value must be at +// least a minimum length. +func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { + minStr := f.Tag.Get("min") + if minStr == "" { + return nil + } + min, _ := strconv.ParseInt(minStr, 10, 64) + + kind := fvalue.Kind() + if kind == reflect.Ptr { + if fvalue.IsNil() { + return nil + } + fvalue = fvalue.Elem() + } + + switch fvalue.Kind() { + case reflect.String: + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + case reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return nil + } + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + + // TODO min can also apply to number minimum value. + + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go new file mode 100644 index 0000000000000..96bfc0e672034 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go @@ -0,0 +1,134 @@ +package corehandlers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/stretchr/testify/require" +) + +var testSvc = func() *client.Client { + s := &client.Client{ + Config: aws.Config{}, + ClientInfo: metadata.ClientInfo{ + ServiceName: "mock-service", + APIVersion: "2015-01-01", + }, + } + return s +}() + +type StructShape struct { + RequiredList []*ConditionalStructShape `required:"true"` + RequiredMap map[string]*ConditionalStructShape `required:"true"` + RequiredBool *bool `required:"true"` + OptionalStruct *ConditionalStructShape + + hiddenParameter *string + + metadataStructureShape +} + +type metadataStructureShape struct { + SDKShapeTraits bool +} + +type ConditionalStructShape struct { + Name *string `required:"true"` + SDKShapeTraits bool +} + +func TestNoErrors(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{}, + RequiredMap: map[string]*ConditionalStructShape{ + "key1": {Name: aws.String("Name")}, + "key2": {Name: aws.String("Name")}, + }, + RequiredBool: aws.Bool(true), + OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")}, + } + + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + require.NoError(t, req.Error) +} + +func TestMissingRequiredParameters(t *testing.T) { + input := &StructShape{} + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Error(t, req.Error) + assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) + assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message()) +} + +func TestNestedMissingRequiredParameters(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{{}}, + RequiredMap: map[string]*ConditionalStructShape{ + "key1": {Name: aws.String("Name")}, + "key2": {}, + }, + RequiredBool: aws.Bool(true), + OptionalStruct: &ConditionalStructShape{}, + } + + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Error(t, req.Error) + assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) + assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message()) +} + +type testInput struct { + StringField string `min:"5"` + PtrStrField *string `min:"2"` + ListField []string `min:"3"` + MapField map[string]string `min:"4"` +} + +var testsFieldMin = []struct { + err awserr.Error + in testInput +}{ + { + err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 5: StringField", nil), + in: testInput{StringField: "abcd"}, + }, + { + err: awserr.New("InvalidParameter", "2 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField", nil), + in: testInput{StringField: "abcd", ListField: []string{"a", "b"}}, + }, + { + err: awserr.New("InvalidParameter", "3 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField\n- field too short, minimum length 4: MapField", nil), + in: testInput{StringField: "abcd", ListField: []string{"a", "b"}, MapField: map[string]string{"a": "a", "b": "b"}}, + }, + { + err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 2: PtrStrField", nil), + in: testInput{StringField: "abcde", PtrStrField: aws.String("v")}, + }, + { + err: nil, + in: testInput{StringField: "abcde", PtrStrField: aws.String("value"), + ListField: []string{"a", "b", "c"}, MapField: map[string]string{"a": "a", "b": "b", "c": "c", "d": "d"}}, + }, +} + +func TestValidateFieldMinParameter(t *testing.T) { + for i, c := range testsFieldMin { + req := testSvc.NewRequest(&request.Operation{}, &c.in, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Equal(t, c.err, req.Error, "%d case failed", i) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 0000000000000..7f509ca83ca51 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,85 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{}, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + for _, p := range c.Providers { + if creds, err := p.Retrieve(); err == nil { + c.curr = p + return creds, nil + } + } + c.curr = nil + + // TODO better error reporting. maybe report error for each failed retrieve? + + return Value{}, ErrNoValidProvidersFoundInChain +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go new file mode 100644 index 0000000000000..4fba22f29f44a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go @@ -0,0 +1,73 @@ +package credentials + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" +) + +func TestChainProviderGet(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, + &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, + &stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + }, + }, + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") +} + +func TestChainProviderIsExpired(t *testing.T) { + stubProvider := &stubProvider{expired: true} + p := &ChainProvider{ + Providers: []Provider{ + stubProvider, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve") + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.False(t, p.IsExpired(), "Expect not expired after retrieve") + + stubProvider.expired = true + assert.True(t, p.IsExpired(), "Expect return of expired provider") + + _, err = p.Retrieve() + assert.False(t, p.IsExpired(), "Expect not expired after retrieve") +} + +func TestChainProviderWithNoProvider(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{}, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned") +} + +func TestChainProviderWithNoValidProvider(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, + &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned") +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 0000000000000..5dd71f02e80d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,220 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go new file mode 100644 index 0000000000000..99c2b47742e03 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go @@ -0,0 +1,62 @@ +package credentials + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" +) + +type stubProvider struct { + creds Value + expired bool + err error +} + +func (s *stubProvider) Retrieve() (Value, error) { + s.expired = false + return s.creds, s.err +} +func (s *stubProvider) IsExpired() bool { + return s.expired +} + +func TestCredentialsGet(t *testing.T) { + c := NewCredentials(&stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + expired: true, + }) + + creds, err := c.Get() + assert.Nil(t, err, "Expected no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") +} + +func TestCredentialsGetWithError(t *testing.T) { + c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true}) + + _, err := c.Get() + assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error") +} + +func TestCredentialsExpire(t *testing.T) { + stub := &stubProvider{} + c := NewCredentials(stub) + + stub.expired = false + assert.True(t, c.IsExpired(), "Expected to start out expired") + c.Expire() + assert.True(t, c.IsExpired(), "Expected to be expired") + + c.forceRefresh = false + assert.False(t, c.IsExpired(), "Expected not to be expired") + + stub.expired = true + assert.True(t, c.IsExpired(), "Expected to be expired") +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 0000000000000..80702c26f9c52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,173 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: &http.Client{ +// Timeout: 10 * time.Second, +// }, +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{}, err + } + + if len(credsList) == 0 { + return credentials.Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go new file mode 100644 index 0000000000000..da3d8ed3ec172 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go @@ -0,0 +1,159 @@ +package ec2rolecreds_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" +) + +const credsRespTmpl = `{ + "Code": "Success", + "Type": "AWS-HMAC", + "AccessKeyId" : "accessKey", + "SecretAccessKey" : "secret", + "Token" : "token", + "Expiration" : "%s", + "LastUpdated" : "2009-11-23T0:00:00Z" +}` + +const credsFailRespTmpl = `{ + "Code": "ErrorCode", + "Message": "ErrorMsg", + "LastUpdated": "2009-11-23T0:00:00Z" +}` + +func initTestServer(expireOn string, failAssume bool) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/latest/meta-data/iam/security-credentials" { + fmt.Fprintln(w, "RoleName") + } else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" { + if failAssume { + fmt.Fprintf(w, credsFailRespTmpl) + } else { + fmt.Fprintf(w, credsRespTmpl, expireOn) + } + } else { + http.Error(w, "bad request", http.StatusBadRequest) + } + })) + + return server +} + +func TestEC2RoleProvider(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestEC2RoleProviderFailAssume(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", true) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + + creds, err := p.Retrieve() + assert.Error(t, err, "Expect error") + + e := err.(awserr.Error) + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "ErrorMsg", e.Message()) + assert.Nil(t, e.OrigErr()) + + assert.Equal(t, "", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "", creds.SessionToken, "Expect session token to match") +} + +func TestEC2RoleProviderIsExpired(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") + + p.CurrentTime = func() time.Time { + return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired.") +} + +func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + ExpiryWindow: time.Hour * 1, + } + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") + + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired.") +} + +func BenchmarkEC3RoleProvider(b *testing.B) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := p.Retrieve(); err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 0000000000000..043e861d6f2c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,73 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go new file mode 100644 index 0000000000000..53f6ce256eb0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go @@ -0,0 +1,70 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestEnvProviderRetrieve(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestEnvProviderIsExpired(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + e := EnvProvider{} + + assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.") +} + +func TestEnvProviderNoAccessKeyID(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err) +} + +func TestEnvProviderNoSecretAccessKey(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err) +} + +func TestEnvProviderAlternateNames(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY", "access") + os.Setenv("AWS_SECRET_KEY", "secret") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key") + assert.Empty(t, creds.SessionToken, "Expected no token") +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 0000000000000..7fc91d9d2047b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 0000000000000..09bd00a95064e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,147 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go new file mode 100644 index 0000000000000..2e26da2634b1e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestSharedCredentialsProvider(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderIsExpired(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve") +} + +func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini") + p := SharedCredentialsProvider{} + creds, err := p.Retrieve() + + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "no_token") + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func TestSharedCredentialsProviderColonInCredFile(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: "with_colon"} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func BenchmarkSharedCredentialsProvider(b *testing.B) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 0000000000000..530a9ac2f3634 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,44 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set pragmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{}, ErrStaticCredentialsEmpty + } + + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go new file mode 100644 index 0000000000000..ea01236962447 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go @@ -0,0 +1,34 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestStaticProviderGet(t *testing.T) { + s := StaticProvider{ + Value: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + } + + creds, err := s.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no session token") +} + +func TestStaticProviderIsExpired(t *testing.T) { + s := StaticProvider{ + Value: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + } + + assert.False(t, s.IsExpired(), "Expect static credentials to never expire") +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 0000000000000..0214860d4ccd2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,130 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + + roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + }) + + if err != nil { + return credentials.Value{}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go new file mode 100644 index 0000000000000..6bd6e91973ebf --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go @@ -0,0 +1,56 @@ +package stscreds + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/stretchr/testify/assert" +) + +type stubSTS struct { +} + +func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { + expiry := time.Now().Add(60 * time.Minute) + return &sts.AssumeRoleOutput{ + Credentials: &sts.Credentials{ + // Just reflect the role arn to the provider. + AccessKeyId: input.RoleArn, + SecretAccessKey: aws.String("assumedSecretAccessKey"), + SessionToken: aws.String("assumedSessionToken"), + Expiration: &expiry, + }, + }, nil +} + +func TestAssumeRoleProvider(t *testing.T) { + stub := &stubSTS{} + p := &AssumeRoleProvider{ + Client: stub, + RoleARN: "roleARN", + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN") + assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match") +} + +func BenchmarkAssumeRoleProvider(b *testing.B) { + stub := &stubSTS{} + p := &AssumeRoleProvider{ + Client: stub, + RoleARN: "roleARN", + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := p.Retrieve(); err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 0000000000000..b746783becabb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,76 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 0000000000000..e5137ca17da91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,43 @@ +package ec2metadata + +import ( + "path" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go new file mode 100644 index 0000000000000..c3c92972b7cb4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go @@ -0,0 +1,101 @@ +package ec2metadata_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "path" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" +) + +func initTestServer(path string, resp string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI != path { + http.Error(w, "not found", http.StatusNotFound) + return + } + + w.Write([]byte(resp)) + })) +} + +func TestEndpoint(t *testing.T) { + c := ec2metadata.New(session.New()) + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", "testpath"), + } + + req := c.NewRequest(op, nil, nil) + assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint) + assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String()) +} + +func TestGetMetadata(t *testing.T) { + server := initTestServer( + "/latest/meta-data/some/path", + "success", // real response includes suffix + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + resp, err := c.GetMetadata("some/path") + + assert.NoError(t, err) + assert.Equal(t, "success", resp) +} + +func TestGetRegion(t *testing.T) { + server := initTestServer( + "/latest/meta-data/placement/availability-zone", + "us-west-2a", // real response includes suffix + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + region, err := c.Region() + + assert.NoError(t, err) + assert.Equal(t, "us-west-2", region) +} + +func TestMetadataAvailable(t *testing.T) { + server := initTestServer( + "/latest/meta-data/instance-id", + "instance-id", + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + available := c.Available() + + assert.True(t, available) +} + +func TestMetadataNotAvailable(t *testing.T) { + c := ec2metadata.New(session.New()) + c.Handlers.Send.Clear() + c.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + r.Error = awserr.New("RequestError", "send request failed", nil) + r.Retryable = aws.Bool(true) // network errors are retryable + }) + + available := c.Available() + + assert.False(t, available) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 0000000000000..f0dc331e012eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,116 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "io/ioutil" + "net" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + // If the default http client is provided, replace it with a custom + // client using default timeouts. + if cfg.HTTPClient == http.DefaultClient { + cfg.HTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + }, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + } + + data := r.Data.(*metadataOutput) + data.Content = string(b) +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + _, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + } + + // TODO extract the error... +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 0000000000000..57663616868fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 0000000000000..f5369487384e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,98 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 0000000000000..3e90a7976aeff --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,140 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + var n HandlerList + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for _, f := range l.list { + f.Fn(r) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go new file mode 100644 index 0000000000000..16a1418283b2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go @@ -0,0 +1,47 @@ +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func TestHandlerList(t *testing.T) { + s := "" + r := &request.Request{} + l := request.HandlerList{} + l.PushBack(func(r *request.Request) { + s += "a" + r.Data = s + }) + l.Run(r) + assert.Equal(t, "a", s) + assert.Equal(t, "a", r.Data) +} + +func TestMultipleHandlers(t *testing.T) { + r := &request.Request{} + l := request.HandlerList{} + l.PushBack(func(r *request.Request) { r.Data = nil }) + l.PushFront(func(r *request.Request) { r.Data = aws.Bool(true) }) + l.Run(r) + if r.Data != nil { + t.Error("Expected handler to execute") + } +} + +func TestNamedHandlers(t *testing.T) { + l := request.HandlerList{} + named := request.NamedHandler{Name: "Name", Fn: func(r *request.Request) {}} + named2 := request.NamedHandler{Name: "NotName", Fn: func(r *request.Request) {}} + l.PushBackNamed(named) + l.PushBackNamed(named) + l.PushBackNamed(named2) + l.PushBack(func(r *request.Request) {}) + assert.Equal(t, 4, l.Len()) + l.Remove(named) + assert.Equal(t, 2, l.Len()) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 0000000000000..3735d7fa533a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,279 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = ioutil.NopCloser(reader) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + r.built = true + } + + return r.Error +} + +// Sign will sign the request retuning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +func (r *Request) Send() error { + for { + r.Sign() + if r.Error != nil { + return r.Error + } + + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // Re-seek the body back to the original point in for a retry so that + // send will send the body's contents again in the upcoming request. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 0000000000000..b19fbd70b4ab0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,96 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + } + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + page.Send() + shouldContinue := fn(page.Data, !page.HasNextPage()) + if page.Error != nil || !shouldContinue { + return page.Error + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go new file mode 100644 index 0000000000000..a9863eaf97f10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go @@ -0,0 +1,392 @@ +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/s3" +) + +// Use DynamoDB methods for simplicity +func TestPaginationQueryPage(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []map[string]*dynamodb.AttributeValue{}, []map[string]*dynamodb.AttributeValue{}, 0, false + + reqNum := 0 + resps := []*dynamodb.QueryOutput{ + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + map[string]*dynamodb.AttributeValue{ + "key": {S: aws.String("key1")}, + }, + }, + }, + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + map[string]*dynamodb.AttributeValue{ + "key": {S: aws.String("key2")}, + }, + }, + }, + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + map[string]*dynamodb.AttributeValue{ + "key": {S: aws.String("key3")}, + }, + }, + }, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.QueryInput) + if in == nil { + tokens = append(tokens, nil) + } else if len(in.ExclusiveStartKey) != 0 { + tokens = append(tokens, in.ExclusiveStartKey) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.QueryInput{ + Limit: aws.Int64(2), + TableName: aws.String("tablename"), + } + err := db.QueryPages(params, func(p *dynamodb.QueryOutput, last bool) bool { + numPages++ + for _, item := range p.Items { + pages = append(pages, item) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + assert.Nil(t, err) + + assert.Equal(t, + []map[string]*dynamodb.AttributeValue{ + map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}}, + map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}}, + }, tokens) + assert.Equal(t, + []map[string]*dynamodb.AttributeValue{ + map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}}, + map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}}, + map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key3")}}, + }, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, params.ExclusiveStartKey) +} + +// Use DynamoDB methods for simplicity +func TestPagination(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.ListTablesInput) + if in == nil { + tokens = append(tokens, "") + } else if in.ExclusiveStartTableName != nil { + tokens = append(tokens, *in.ExclusiveStartTableName) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { + numPages++ + for _, t := range p.TableNames { + pages = append(pages, *t) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + + assert.Equal(t, []string{"Table2", "Table4"}, tokens) + assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, err) + assert.Nil(t, params.ExclusiveStartTableName) +} + +// Use DynamoDB methods for simplicity +func TestPaginationEachPage(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.ListTablesInput) + if in == nil { + tokens = append(tokens, "") + } else if in.ExclusiveStartTableName != nil { + tokens = append(tokens, *in.ExclusiveStartTableName) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + req, _ := db.ListTablesRequest(params) + err := req.EachPage(func(p interface{}, last bool) bool { + numPages++ + for _, t := range p.(*dynamodb.ListTablesOutput).TableNames { + pages = append(pages, *t) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + + return true + }) + + assert.Equal(t, []string{"Table2", "Table4"}, tokens) + assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, err) +} + +// Use DynamoDB methods for simplicity +func TestPaginationEarlyExit(t *testing.T) { + db := dynamodb.New(unit.Session) + numPages, gotToEnd := 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { + numPages++ + if numPages == 2 { + return false + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + + assert.Equal(t, 2, numPages) + assert.False(t, gotToEnd) + assert.Nil(t, err) +} + +func TestSkipPagination(t *testing.T) { + client := s3.New(unit.Session) + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = &s3.HeadBucketOutput{} + }) + + req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")}) + + numPages, gotToEnd := 0, false + req.EachPage(func(p interface{}, last bool) bool { + numPages++ + if last { + gotToEnd = true + } + return true + }) + assert.Equal(t, 1, numPages) + assert.True(t, gotToEnd) +} + +// Use S3 for simplicity +func TestPaginationTruncation(t *testing.T) { + client := s3.New(unit.Session) + + reqNum := 0 + resps := []*s3.ListObjectsOutput{ + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}}, + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}}, + {IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}}, + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}}, + } + + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &s3.ListObjectsInput{Bucket: aws.String("bucket")} + + results := []string{} + err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { + results = append(results, *p.Contents[0].Key) + return true + }) + + assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results) + assert.Nil(t, err) + + // Try again without truncation token at all + reqNum = 0 + resps[1].IsTruncated = nil + resps[2].IsTruncated = aws.Bool(true) + results = []string{} + err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { + results = append(results, *p.Contents[0].Key) + return true + }) + + assert.Equal(t, []string{"Key1", "Key2"}, results) + assert.Nil(t, err) + +} + +// Benchmarks +var benchResps = []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE")}}, +} + +var benchDb = func() *dynamodb.DynamoDB { + db := dynamodb.New(unit.Session) + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + return db +} + +func BenchmarkCodegenIterator(b *testing.B) { + reqNum := 0 + db := benchDb() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = benchResps[reqNum] + reqNum++ + }) + + input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error { + page, _ := db.ListTablesRequest(input) + for ; page != nil; page = page.NextPage() { + page.Send() + out := page.Data.(*dynamodb.ListTablesOutput) + if result := fn(out, !page.HasNextPage()); page.Error != nil || !result { + return page.Error + } + } + return nil + } + + for i := 0; i < b.N; i++ { + reqNum = 0 + iter(func(p *dynamodb.ListTablesOutput, last bool) bool { + return true + }) + } +} + +func BenchmarkEachPageIterator(b *testing.B) { + reqNum := 0 + db := benchDb() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = benchResps[reqNum] + reqNum++ + }) + + input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + for i := 0; i < b.N; i++ { + reqNum = 0 + req, _ := db.ListTablesRequest(input) + req.EachPage(func(p interface{}, last bool) bool { + return true + }) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_test.go new file mode 100644 index 0000000000000..4828dff7fda01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_test.go @@ -0,0 +1,261 @@ +package request_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +type testData struct { + Data string +} + +func body(str string) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(str))) +} + +func unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.Data != nil { + json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data) + } + return +} + +func unmarshalError(req *request.Request) { + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err) + return + } + req.Error = awserr.NewRequestFailure( + awserr.New(jsonErr.Code, jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + "", + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} + +// test that retries occur for 5xx status codes +func TestRequestRecoverRetry5xx(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry` +func TestRequestRecoverRetry4xxRetryable(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)}, + {StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +// test that retries don't occur for 4xx status codes with a response type that can't be retried +func TestRequest4xxUnretryable(t *testing.T) { + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)} + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 401, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code()) + assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message()) + assert.Equal(t, 0, int(r.RetryCount)) +} + +func TestRequestExhaustRetries(t *testing.T) { + delays := []time.Duration{} + sleepDelay := func(delay time.Duration) { + delays = append(delays, delay) + } + + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 500, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "UnknownError", err.(awserr.Error).Code()) + assert.Equal(t, "An error occurred.", err.(awserr.Error).Message()) + assert.Equal(t, 3, int(r.RetryCount)) + + expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}} + for i, v := range delays { + min := expectDelays[i].min * time.Millisecond + max := expectDelays[i].max * time.Millisecond + assert.True(t, min <= v && v <= max, + "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max) + } +} + +// test that the request is retried after the credentials are expired. +func TestRequestRecoverExpiredCreds(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(&aws.Config{MaxRetries: aws.Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")}) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + + credExpiredBeforeRetry := false + credExpiredAfterRetry := false + + s.Handlers.AfterRetry.PushBack(func(r *request.Request) { + credExpiredAfterRetry = r.Config.Credentials.IsExpired() + }) + + s.Handlers.Sign.Clear() + s.Handlers.Sign.PushBack(func(r *request.Request) { + r.Config.Credentials.Get() + }) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + + assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check") + assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check") + assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery") + + assert.Equal(t, 1, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +func TestMakeAddtoUserAgentHandler(t *testing.T) { + fn := request.MakeAddToUserAgentHandler("name", "version", "extra1", "extra2") + r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}} + r.HTTPRequest.Header.Set("User-Agent", "foo/bar") + fn(r) + + assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent")) +} + +func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) { + fn := request.MakeAddToUserAgentFreeFormHandler("name/version (extra1; extra2)") + r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}} + r.HTTPRequest.Header.Set("User-Agent", "foo/bar") + fn(r) + + assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent")) +} + +func TestRequestUserAgent(t *testing.T) { + s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")}) + // s.Handlers.Validate.Clear() + + req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{}) + req.HTTPRequest.Header.Set("User-Agent", "foo/bar") + assert.NoError(t, req.Build()) + + expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)", + aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + assert.Equal(t, expectUA, req.HTTPRequest.Header.Get("User-Agent")) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 0000000000000..b06143b835879 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,74 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 0000000000000..eb7fc2052f1ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,105 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +package session + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + def := defaults.Get() + s := &Session{ + Config: def.Config, + Handlers: def.Handlers, + } + s.Config.MergeIn(cfgs...) + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2"}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session_test.go new file mode 100644 index 0000000000000..e56c02fc6618e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session_test.go @@ -0,0 +1,20 @@ +package session_test + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +func TestNewDefaultSession(t *testing.T) { + s := session.New(&aws.Config{Region: aws.String("region")}) + + assert.Equal(t, "region", *s.Config.Region) + assert.Equal(t, http.DefaultClient, s.Config.HTTPClient) + assert.NotNil(t, s.Config.Logger) + assert.Equal(t, aws.LogOff, *s.Config.LogLevel) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 0000000000000..0f067c57f4e21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,88 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + expLen := pos + int64(len(p)) + if int64(len(b.buf)) < expLen { + newBuf := make([]byte, expLen) + copy(newBuf, b.buf) + b.buf = newBuf + } + copy(b.buf[pos:], p) + return len(p), nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types_test.go new file mode 100644 index 0000000000000..a4ed20e7d2202 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types_test.go @@ -0,0 +1,56 @@ +package aws + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteAtBuffer(t *testing.T) { + b := &WriteAtBuffer{} + + n, err := b.WriteAt([]byte{1}, 0) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + n, err = b.WriteAt([]byte{1, 1, 1}, 5) + assert.NoError(t, err) + assert.Equal(t, 3, n) + + n, err = b.WriteAt([]byte{2}, 1) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + n, err = b.WriteAt([]byte{3}, 2) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + assert.Equal(t, []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes()) +} + +func BenchmarkWriteAtBuffer(b *testing.B) { + buf := &WriteAtBuffer{} + r := rand.New(rand.NewSource(1)) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + to := r.Intn(10) * 4096 + bs := make([]byte, to) + buf.WriteAt(bs, r.Int63n(10)*4096) + } +} + +func BenchmarkWriteAtBufferParallel(b *testing.B) { + buf := &WriteAtBuffer{} + r := rand.New(rand.NewSource(1)) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + to := r.Intn(10) * 4096 + bs := make([]byte, to) + buf.WriteAt(bs, r.Int63n(10)*4096) + } + }) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 0000000000000..f4741adfb4478 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "0.10.4" diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go new file mode 100644 index 0000000000000..d040cccd57d36 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go @@ -0,0 +1,31 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import "strings" + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + return + } + } + return +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json new file mode 100644 index 0000000000000..4c588090a9c57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json @@ -0,0 +1,77 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "us-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-west-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "sa-east-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com", + "signatureVersion": "v4" + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go new file mode 100644 index 0000000000000..894c1a6434198 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go @@ -0,0 +1,89 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "ap-northeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "eu-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "sa-east-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + "us-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-west-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + }, +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go new file mode 100644 index 0000000000000..8af65879d406e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go @@ -0,0 +1,28 @@ +package endpoints + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGlobalEndpoints(t *testing.T) { + region := "mock-region-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts"} + + for _, name := range svcs { + ep, sr := EndpointForRegion(name, region) + assert.Equal(t, name+".amazonaws.com", ep) + assert.Equal(t, "us-east-1", sr) + } +} + +func TestServicesInCN(t *testing.T) { + region := "cn-north-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3"} + + for _, name := range svcs { + ep, _ := EndpointForRegion(name, region) + assert.Equal(t, name+"."+region+".amazonaws.com.cn", ep) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build.go new file mode 100644 index 0000000000000..e3d4147ee33eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build.go @@ -0,0 +1,32 @@ +// Package ec2query provides serialisation of AWS EC2 requests and responses. +package ec2query + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/ec2.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/protocol/query/queryutil" +) + +// Build builds a request for the EC2 protocol. +func Build(r *aws.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.Service.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, true); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build_test.go new file mode 100644 index 0000000000000..7973dd3baec89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build_test.go @@ -0,0 +1,860 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/internal/protocol/ec2query" + "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/internal/signer/v4" + "github.com/aws/aws-sdk-go/internal/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF + +type InputService1ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService1ProtocolTest client. +func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice1protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &InputService1ProtocolTest{service} +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + Bar *string `type:"string"` + + Foo *string `type:"string"` + + metadataInputService1TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService2ProtocolTest client. +func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice2protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &InputService2ProtocolTest{service} +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeInputShape struct { + Bar *string `locationName:"barLocationName" type:"string"` + + Foo *string `type:"string"` + + Yuck *string `locationName:"yuckLocationName" queryName:"yuckQueryName" type:"string"` + + metadataInputService2TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService3ProtocolTest client. +func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice3protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &InputService3ProtocolTest{service} +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + StructArg *InputService3TestShapeStructType `locationName:"Struct" type:"structure"` + + metadataInputService3TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeStructType struct { + ScalarArg *string `locationName:"Scalar" type:"string"` + + metadataInputService3TestShapeStructType `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService4ProtocolTest client. +func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice4protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &InputService4ProtocolTest{service} +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4TestShapeInputShape struct { + ListArg []*string `type:"list"` + + metadataInputService4TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService5ProtocolTest client. +func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice5protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &InputService5ProtocolTest{service} +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5TestShapeInputShape struct { + ListArg []*string `locationName:"ListMemberName" locationNameList:"item" type:"list"` + + metadataInputService5TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService6ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService6ProtocolTest client. +func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice6protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &InputService6ProtocolTest{service} +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService6TestShapeInputShape struct { + ListArg []*string `locationName:"ListMemberName" queryName:"ListQueryName" locationNameList:"item" type:"list"` + + metadataInputService6TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService6TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService7ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService7ProtocolTest client. +func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice7protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &InputService7ProtocolTest{service} +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService7TestShapeInputShape struct { + BlobArg []byte `type:"blob"` + + metadataInputService7TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService7TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService8ProtocolTest client. +func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice8protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &InputService8ProtocolTest{service} +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputShape struct { + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataInputService8TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService8TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + svc := NewInputService1ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService1TestShapeInputShape{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedToMembersCase1(t *testing.T) { + svc := NewInputService2ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService2TestShapeInputShape{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + Yuck: aws.String("val3"), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) { + svc := NewInputService3ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService3TestShapeInputShape{ + StructArg: &InputService3TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestListTypesCase1(t *testing.T) { + svc := NewInputService4ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService4TestShapeInputShape{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *testing.T) { + svc := NewInputService5ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService5TestShapeInputShape{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testing.T) { + svc := NewInputService6ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService6TestShapeInputShape{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + svc := NewInputService7ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService7TestShapeInputShape{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal.go new file mode 100644 index 0000000000000..e59b2bbac3e27 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal.go @@ -0,0 +1,54 @@ +package ec2query + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/ec2.json unmarshal_test.go + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" +) + +// Unmarshal unmarshals a response body for the EC2 protocol. +func Unmarshal(r *aws.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals response headers for the EC2 protocol. +func UnmarshalMeta(r *aws.Request) { + // TODO implement unmarshaling of request IDs +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Code string `xml:"Errors>Error>Code"` + Message string `xml:"Errors>Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals a response error for the EC2 protocol. +func UnmarshalError(r *aws.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal_test.go new file mode 100644 index 0000000000000..a4527c14e7ed3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal_test.go @@ -0,0 +1,816 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/internal/protocol/ec2query" + "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/internal/signer/v4" + "github.com/aws/aws-sdk-go/internal/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF + +type OutputService1ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService1ProtocolTest client. +func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice1protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &OutputService1ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService1TestShapeOutputShape struct { + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` + + metadataOutputService1TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService1TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService2ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService2ProtocolTest client. +func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice2protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &OutputService2ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputShape, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService2TestShapeOutputShape struct { + Blob []byte `type:"blob"` + + metadataOutputService2TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService2TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService3ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService3ProtocolTest client. +func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice3protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &OutputService3ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputShape, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService3TestShapeOutputShape struct { + ListMember []*string `type:"list"` + + metadataOutputService3TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService3TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService4ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService4ProtocolTest client. +func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice4protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &OutputService4ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService4TestShapeOutputShape struct { + ListMember []*string `locationNameList:"item" type:"list"` + + metadataOutputService4TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService4TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService5ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService5ProtocolTest client. +func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice5protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &OutputService5ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputShape, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService5TestShapeOutputShape struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService5TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService5TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService6ProtocolTest client. +func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice6protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &OutputService6ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputShape, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeOutputShape struct { + Map map[string]*OutputService6TestShapeStructureType `type:"map"` + + metadataOutputService6TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeStructureType struct { + Foo *string `locationName:"foo" type:"string"` + + metadataOutputService6TestShapeStructureType `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeStructureType struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService7ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService7ProtocolTest client. +func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice7protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &OutputService7ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputShape, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService7TestShapeOutputShape struct { + Map map[string]*string `type:"map" flattened:"true"` + + metadataOutputService7TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService7TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService8ProtocolTest client. +func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice8protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(ec2query.Build) + service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return &OutputService8ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputShape, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeOutputShape struct { + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` + + metadataOutputService8TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + svc := NewOutputService1ProtocolTest(nil) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200arequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + svc := NewOutputService2ProtocolTest(nil) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + svc := NewOutputService3ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + svc := NewOutputService4ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + svc := NewOutputService5ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + svc := NewOutputService6ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + svc := NewOutputService7ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + svc := NewOutputService8ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go new file mode 100644 index 0000000000000..c4d8dd2635ac2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go @@ -0,0 +1,33 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/protocol/query/queryutil" +) + +// Build builds a request for an AWS Query service. +func Build(r *aws.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.Service.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go new file mode 100644 index 0000000000000..52bbf7e1c87b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go @@ -0,0 +1,1482 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/internal/protocol/query" + "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/internal/signer/v4" + "github.com/aws/aws-sdk-go/internal/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF + +type InputService1ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService1ProtocolTest client. +func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice1protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService1ProtocolTest{service} +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + Bar *string `type:"string"` + + Foo *string `type:"string"` + + metadataInputService1TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService2ProtocolTest client. +func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice2protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService2ProtocolTest{service} +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeInputShape struct { + StructArg *InputService2TestShapeStructType `type:"structure"` + + metadataInputService2TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeStructType struct { + ScalarArg *string `type:"string"` + + metadataInputService2TestShapeStructType `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService3ProtocolTest client. +func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice3protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService3ProtocolTest{service} +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &aws.Operation{ + Name: opInputService3TestCaseOperation2, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + metadataInputService3TestShapeInputService3TestCaseOperation2Output `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + ListArg []*string `type:"list"` + + metadataInputService3TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService4ProtocolTest client. +func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice4protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService4ProtocolTest{service} +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService4TestCaseOperation2 = "OperationName" + +// InputService4TestCaseOperation2Request generates a request for the InputService4TestCaseOperation2 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation2Output) { + op := &aws.Operation{ + Name: opInputService4TestCaseOperation2, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation2Output, error) { + req, out := c.InputService4TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation2Output struct { + metadataInputService4TestShapeInputService4TestCaseOperation2Output `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4TestShapeInputShape struct { + ListArg []*string `type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` + + metadataInputService4TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService5ProtocolTest client. +func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice5protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService5ProtocolTest{service} +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation2 = "OperationName" + +// InputService5TestCaseOperation2Request generates a request for the InputService5TestCaseOperation2 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation2Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation2Output) { + op := &aws.Operation{ + Name: opInputService5TestCaseOperation2, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation2(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation2Output, error) { + req, out := c.InputService5TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation2Output struct { + metadataInputService5TestShapeInputService5TestCaseOperation2Output `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5TestShapeInputShape struct { + MapArg map[string]*string `type:"map"` + + metadataInputService5TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService6ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService6ProtocolTest client. +func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice6protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService6ProtocolTest{service} +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService6TestShapeInputShape struct { + MapArg map[string]*string `locationNameKey:"TheKey" locationNameValue:"TheValue" type:"map"` + + metadataInputService6TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService6TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService7ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService7ProtocolTest client. +func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice7protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService7ProtocolTest{service} +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService7TestShapeInputShape struct { + BlobArg []byte `type:"blob"` + + metadataInputService7TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService7TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService8ProtocolTest client. +func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice8protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService8ProtocolTest{service} +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputShape struct { + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataInputService8TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService8TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9ProtocolTest struct { + *aws.Service +} + +// New returns a new InputService9ProtocolTest client. +func NewInputService9ProtocolTest(config *aws.Config) *InputService9ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "inputservice9protocoltest", + APIVersion: "2014-01-01", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &InputService9ProtocolTest{service} +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &aws.Operation{ + Name: opInputService9TestCaseOperation1, + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService9TestCaseOperation2 = "OperationName" + +// InputService9TestCaseOperation2Request generates a request for the InputService9TestCaseOperation2 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation2Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation2Output) { + op := &aws.Operation{ + Name: opInputService9TestCaseOperation2, + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation2(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation2Output, error) { + req, out := c.InputService9TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService9TestCaseOperation3 = "OperationName" + +// InputService9TestCaseOperation3Request generates a request for the InputService9TestCaseOperation3 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation3Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation3Output) { + op := &aws.Operation{ + Name: opInputService9TestCaseOperation3, + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation3(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation3Output, error) { + req, out := c.InputService9TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService9TestCaseOperation4 = "OperationName" + +// InputService9TestCaseOperation4Request generates a request for the InputService9TestCaseOperation4 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation4Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation4Output) { + op := &aws.Operation{ + Name: opInputService9TestCaseOperation4, + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation4(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation4Output, error) { + req, out := c.InputService9TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService9TestCaseOperation5 = "OperationName" + +// InputService9TestCaseOperation5Request generates a request for the InputService9TestCaseOperation5 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation5Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation5Output) { + op := &aws.Operation{ + Name: opInputService9TestCaseOperation5, + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation5(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation5Output, error) { + req, out := c.InputService9TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService9TestCaseOperation6 = "OperationName" + +// InputService9TestCaseOperation6Request generates a request for the InputService9TestCaseOperation6 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation6Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation6Output) { + op := &aws.Operation{ + Name: opInputService9TestCaseOperation6, + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation6(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation6Output, error) { + req, out := c.InputService9TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeInputService9TestCaseOperation2Output struct { + metadataInputService9TestShapeInputService9TestCaseOperation2Output `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeInputService9TestCaseOperation3Output struct { + metadataInputService9TestShapeInputService9TestCaseOperation3Output `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation3Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeInputService9TestCaseOperation4Output struct { + metadataInputService9TestShapeInputService9TestCaseOperation4Output `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation4Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeInputService9TestCaseOperation5Output struct { + metadataInputService9TestShapeInputService9TestCaseOperation5Output `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation5Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeInputService9TestCaseOperation6Output struct { + metadataInputService9TestShapeInputService9TestCaseOperation6Output `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation6Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeInputShape struct { + RecursiveStruct *InputService9TestShapeRecursiveStructType `type:"structure"` + + metadataInputService9TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeRecursiveStructType struct { + NoRecurse *string `type:"string"` + + RecursiveList []*InputService9TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService9TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService9TestShapeRecursiveStructType `type:"structure"` + + metadataInputService9TestShapeRecursiveStructType `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeRecursiveStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + svc := NewInputService1ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService1TestShapeInputShape{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) { + svc := NewInputService2ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService2TestShapeInputShape{ + StructArg: &InputService2TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase1(t *testing.T) { + svc := NewInputService3ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService3TestShapeInputShape{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase2(t *testing.T) { + svc := NewInputService3ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService3TestShapeInputShape{ + ListArg: []*string{}, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListArg=&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) { + svc := NewInputService4ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService4TestShapeInputShape{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) { + svc := NewInputService4ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService4TestShapeInputShape{ + ListArg: []*string{}, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService4TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&ListArg=&ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestSerializeMapTypeCase1(t *testing.T) { + svc := NewInputService5ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService5TestShapeInputShape{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestSerializeMapTypeCase2(t *testing.T) { + svc := NewInputService5ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService5TestShapeInputShape{ + MapArg: map[string]*string{}, + } + req, _ := svc.InputService5TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&MapArg=&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testing.T) { + svc := NewInputService6ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService6TestShapeInputShape{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + svc := NewInputService7ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService7TestShapeInputShape{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { + svc := NewInputService8ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService8TestShapeInputShape{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestRecursiveShapesCase1(t *testing.T) { + svc := NewInputService9ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService9TestShapeInputShape{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestRecursiveShapesCase2(t *testing.T) { + svc := NewInputService9ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService9TestShapeInputShape{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService9TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestRecursiveShapesCase3(t *testing.T) { + svc := NewInputService9ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService9TestShapeInputShape{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService9TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestRecursiveShapesCase4(t *testing.T) { + svc := NewInputService9ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService9TestShapeInputShape{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + RecursiveList: []*InputService9TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService9TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestRecursiveShapesCase5(t *testing.T) { + svc := NewInputService9ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService9TestShapeInputShape{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + RecursiveList: []*InputService9TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService9TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestRecursiveShapesCase6(t *testing.T) { + svc := NewInputService9ProtocolTest(nil) + svc.Endpoint = "https://test" + + input := &InputService9TestShapeInputShape{ + RecursiveStruct: &InputService9TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService9TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService9TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=bar&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=bar&RecursiveStruct.RecursiveMap.entry.2.key=foo&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) + + // assert URL + assert.Equal(t, "https://test/", r.URL.String()) + + // assert headers + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go new file mode 100644 index 0000000000000..3b417a89f7183 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go @@ -0,0 +1,223 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + value := elemOf(value.Field(i)) + field := t.Field(i) + var name string + + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, value, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go new file mode 100644 index 0000000000000..e8cfa926b656c --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go @@ -0,0 +1,29 @@ +package query + +//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" +) + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *aws.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *aws.Request) { + // TODO implement unmarshaling of request IDs +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go new file mode 100644 index 0000000000000..d88ee33580504 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go @@ -0,0 +1,33 @@ +package query + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *aws.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go new file mode 100644 index 0000000000000..a44060ce8feb1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go @@ -0,0 +1,1418 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/internal/protocol/query" + "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/internal/signer/v4" + "github.com/aws/aws-sdk-go/internal/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF + +type OutputService1ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService1ProtocolTest client. +func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice1protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService1ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService1TestShapeOutputShape struct { + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` + + metadataOutputService1TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService1TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService2ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService2ProtocolTest client. +func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice2protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService2ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputShape, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService2TestShapeOutputShape struct { + Num *int64 `type:"integer"` + + Str *string `type:"string"` + + metadataOutputService2TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService2TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService3ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService3ProtocolTest client. +func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice3protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService3ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputShape, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService3TestShapeOutputShape struct { + Blob []byte `type:"blob"` + + metadataOutputService3TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService3TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService4ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService4ProtocolTest client. +func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice4protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService4ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService4TestShapeOutputShape struct { + ListMember []*string `type:"list"` + + metadataOutputService4TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService4TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService5ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService5ProtocolTest client. +func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice5protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService5ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputShape, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService5TestShapeOutputShape struct { + ListMember []*string `locationNameList:"item" type:"list"` + + metadataOutputService5TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService5TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService6ProtocolTest client. +func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice6protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService6ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputShape, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeOutputShape struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService6TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService7ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService7ProtocolTest client. +func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice7protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService7ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputShape, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService7TestShapeOutputShape struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService7TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService7TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService8ProtocolTest client. +func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice8protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService8ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputShape, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeOutputShape struct { + List []*OutputService8TestShapeStructureShape `type:"list"` + + metadataOutputService8TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeStructureShape struct { + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` + + metadataOutputService8TestShapeStructureShape `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeStructureShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService9ProtocolTest client. +func NewOutputService9ProtocolTest(config *aws.Config) *OutputService9ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice9protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService9ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *aws.Request, output *OutputService9TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputShape, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9TestShapeOutputShape struct { + List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"` + + metadataOutputService9TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService9TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9TestShapeStructureShape struct { + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` + + metadataOutputService9TestShapeStructureShape `json:"-" xml:"-"` +} + +type metadataOutputService9TestShapeStructureShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService10ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService10ProtocolTest client. +func NewOutputService10ProtocolTest(config *aws.Config) *OutputService10ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice10protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService10ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *aws.Request, output *OutputService10TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputShape, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService10TestShapeOutputShape struct { + List []*string `locationNameList:"NamedList" type:"list" flattened:"true"` + + metadataOutputService10TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService10TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService11ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService11ProtocolTest client. +func NewOutputService11ProtocolTest(config *aws.Config) *OutputService11ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice11protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService11ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *aws.Request, output *OutputService11TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputShape, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService11TestShapeOutputShape struct { + Map map[string]*OutputService11TestShapeStructType `type:"map"` + + metadataOutputService11TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService11TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService11TestShapeStructType struct { + Foo *string `locationName:"foo" type:"string"` + + metadataOutputService11TestShapeStructType `json:"-" xml:"-"` +} + +type metadataOutputService11TestShapeStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService12ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService12ProtocolTest client. +func NewOutputService12ProtocolTest(config *aws.Config) *OutputService12ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice12protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService12ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService12TestCaseOperation1 = "OperationName" + +// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *aws.Request, output *OutputService12TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService12TestCaseOperation1, + } + + if input == nil { + input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService12TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputShape, error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService12TestShapeOutputShape struct { + Map map[string]*string `type:"map" flattened:"true"` + + metadataOutputService12TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService12TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService13ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService13ProtocolTest client. +func NewOutputService13ProtocolTest(config *aws.Config) *OutputService13ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice13protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService13ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService13ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService13TestCaseOperation1 = "OperationName" + +// OutputService13TestCaseOperation1Request generates a request for the OutputService13TestCaseOperation1 operation. +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *aws.Request, output *OutputService13TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService13TestCaseOperation1, + } + + if input == nil { + input = &OutputService13TestShapeOutputService13TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService13TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (*OutputService13TestShapeOutputShape, error) { + req, out := c.OutputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Input struct { + metadataOutputService13TestShapeOutputService13TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService13TestShapeOutputService13TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService13TestShapeOutputShape struct { + Map map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + metadataOutputService13TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService13TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService14ProtocolTest struct { + *aws.Service +} + +// New returns a new OutputService14ProtocolTest client. +func NewOutputService14ProtocolTest(config *aws.Config) *OutputService14ProtocolTest { + service := &aws.Service{ + Config: aws.DefaultConfig.Merge(config), + ServiceName: "outputservice14protocoltest", + APIVersion: "", + } + service.Initialize() + + // Handlers + service.Handlers.Sign.PushBack(v4.Sign) + service.Handlers.Build.PushBack(query.Build) + service.Handlers.Unmarshal.PushBack(query.Unmarshal) + service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return &OutputService14ProtocolTest{service} +} + +// newRequest creates a new request for a OutputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := aws.NewRequest(c.Service, op, params, data) + + return req +} + +const opOutputService14TestCaseOperation1 = "OperationName" + +// OutputService14TestCaseOperation1Request generates a request for the OutputService14TestCaseOperation1 operation. +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *aws.Request, output *OutputService14TestShapeOutputShape) { + op := &aws.Operation{ + Name: opOutputService14TestCaseOperation1, + } + + if input == nil { + input = &OutputService14TestShapeOutputService14TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService14TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (*OutputService14TestShapeOutputShape, error) { + req, out := c.OutputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Input struct { + metadataOutputService14TestShapeOutputService14TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService14TestShapeOutputService14TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService14TestShapeOutputShape struct { + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` + + metadataOutputService14TestShapeOutputShape `json:"-" xml:"-"` +} + +type metadataOutputService14TestShapeOutputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + svc := NewOutputService1ProtocolTest(nil) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) { + svc := NewOutputService2ProtocolTest(nil) + + buf := bytes.NewReader([]byte("mynamerequest-id")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "myname", *out.Str) + +} + +func TestOutputService3ProtocolTestBlobCase1(t *testing.T) { + svc := NewOutputService3ProtocolTest(nil) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + svc := NewOutputService4ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + svc := NewOutputService5ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) { + svc := NewOutputService6ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) { + svc := NewOutputService7ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abcrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + +} + +func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) { + svc := NewOutputService8ProtocolTest(nil) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) { + svc := NewOutputService9ProtocolTest(nil) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + svc := NewOutputService10ProtocolTest(nil) + + buf := bytes.NewReader([]byte("abrequestid")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.List[0]) + assert.Equal(t, "b", *out.List[1]) + +} + +func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) { + svc := NewOutputService11ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) { + svc := NewOutputService12ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) { + svc := NewOutputService13ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarrequestid")) + req, out := svc.OutputService13TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) { + svc := NewOutputService14ProtocolTest(nil) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService14TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go new file mode 100644 index 0000000000000..8917ad7a9bc5d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go @@ -0,0 +1,217 @@ +// Package rest provides RESTful serialisation of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// Build builds the REST component of a service request. +func Build(r *aws.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *aws.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + switch field.Tag.Get("location") { + case "headers": // header maps + buildHeaderMap(r, m, field.Tag.Get("locationName")) + case "header": + buildHeader(r, m, name) + case "uri": + buildURI(r, m, name) + case "querystring": + buildQueryString(r, m, name, query) + } + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *aws.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(r *aws.Request, v reflect.Value, name string) { + str, err := convertType(v) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode REST request", err) + } else if str != nil { + r.HTTPRequest.Header.Add(name, *str) + } +} + +func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode REST request", err) + } else if str != nil { + r.HTTPRequest.Header.Add(prefix+key.String(), *str) + } + } +} + +func buildURI(r *aws.Request, v reflect.Value, name string) { + value, err := convertType(v) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode REST request", err) + } else if value != nil { + uri := r.HTTPRequest.URL.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1) + r.HTTPRequest.URL.Path = uri + } +} + +func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) { + str, err := convertType(v) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode REST request", err) + } else if str != nil { + query.Set(name, *str) + } +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + buf.WriteByte('%') + buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (*string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return nil, nil + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return nil, err + } + return &str, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go new file mode 100644 index 0000000000000..1f603bb719fef --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go new file mode 100644 index 0000000000000..a4155f1669999 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go @@ -0,0 +1,174 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *aws.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *aws.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *aws.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go new file mode 100644 index 0000000000000..d3db250231b59 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go @@ -0,0 +1,287 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("SDKShapeTraits"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + member := elemOf(value.Field(i)) + field := t.Field(i) + mTag := field.Tag + + if mTag.Get("location") != "" { // skip non-body members + continue + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 0000000000000..5e4fe210b3698 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("SDKShapeTraits"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 0000000000000..72c198a9d8d00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go new file mode 100644 index 0000000000000..fbb0e41cdc152 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go @@ -0,0 +1,43 @@ +package v4_test + +import ( + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/internal/test/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +var _ = unit.Imported + +func TestPresignHandler(t *testing.T) { + svc := s3.New(nil) + req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + ContentDisposition: aws.String("a+b c$d"), + ACL: aws.String("public-read"), + }) + req.Time = time.Unix(0, 0) + urlstr, err := req.Presign(5 * time.Minute) + + assert.NoError(t, err) + + expectedDate := "19700101T000000Z" + expectedHeaders := "host;x-amz-acl" + expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2" + expectedCred := "AKID/19700101/mock-region/s3/aws4_request" + + u, _ := url.Parse(urlstr) + urlQ := u.Query() + assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) + assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) + + assert.NotContains(t, urlstr, "+") // + encoded as %20 +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go new file mode 100644 index 0000000000000..748c37f2e65d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go @@ -0,0 +1,364 @@ +// Package v4 implements signing for AWS V4 signer +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug aws.LogLevelType + Logger aws.Logger + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign requests with signature version 4. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *aws.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Service.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.Service.SigningRegion + if region == "" { + region = aws.StringValue(req.Service.Config.Region) + } + + name := req.Service.SigningName + if name == "" { + name = req.Service.ServiceName + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Service.Config.Credentials, + Debug: req.Service.Config.LogLevel.Value(), + Logger: req.Service.Config.Logger, + } + + req.Error = s.sign() +} + +func (v4 *signer) sign() error { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isRequestSigned() { + if !v4.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v4.isPresign { + v4.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v4.Request.URL.RawQuery = v4.Query.Encode() + } + } + + var err error + v4.CredValues, err = v4.Credentials.Get() + if err != nil { + return err + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.CredValues.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.CredValues.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } + + v4.build() + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *signer) logSigningInfo() { + signedURLMsg := "" + if v4.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (v4 *signer) build() { + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + if v4.isPresign { + v4.buildQuery() // no depends + } + v4.buildCanonicalHeaders() // depends on cred string + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) + } +} + +func (v4 *signer) buildQuery() { + for k, h := range v4.Request.Header { + if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") { + continue // never hoist x-amz-* headers, they must be signed + } + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // never hoist ignored headers + } + + v4.Request.Header.Del(k) + v4.Query.Del(k) + for _, v := range h { + v4.Query.Add(k, v) + } + } +} + +func (v4 *signer) buildCanonicalHeaders() { + var headers []string + headers = append(headers, "host") + for k := range v4.Request.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v4.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.CredValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (v4 *signer) isRequestSigned() bool { + if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { + return true + } + if v4.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v4 *signer) removePresign() { + v4.Query.Del("X-Amz-Algorithm") + v4.Query.Del("X-Amz-Signature") + v4.Query.Del("X-Amz-Security-Token") + v4.Query.Del("X-Amz-Date") + v4.Query.Del("X-Amz-Expires") + v4.Query.Del("X-Amz-Credential") + v4.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go new file mode 100644 index 0000000000000..adf8e7bd89e5a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go @@ -0,0 +1,245 @@ +package v4 + +import ( + "net/http" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/stretchr/testify/assert" +) + +func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer { + endpoint := "https://" + serviceName + "." + region + ".amazonaws.com" + reader := strings.NewReader(body) + req, _ := http.NewRequest("POST", endpoint, reader) + req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()" + req.Header.Add("X-Amz-Target", "prefix.Operation") + req.Header.Add("Content-Type", "application/x-amz-json-1.0") + req.Header.Add("Content-Length", string(len(body))) + req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)") + + return signer{ + Request: req, + Time: signTime, + ExpireTime: expireTime, + Query: req.URL.Query(), + Body: reader, + ServiceName: serviceName, + Region: region, + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + } +} + +func removeWS(text string) string { + text = strings.Replace(text, " ", "", -1) + text = strings.Replace(text, "\n", "", -1) + text = strings.Replace(text, "\t", "", -1) + return text +} + +func assertEqual(t *testing.T, expected, given string) { + if removeWS(expected) != removeWS(given) { + t.Errorf("\nExpected: %s\nGiven: %s", expected, given) + } +} + +func TestPresignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedHeaders := "host;x-amz-meta-other-header;x-amz-target" + expectedSig := "5eeedebf6f995145ce56daa02902d10485246d3defb34f97b973c1f40ab82d36" + expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request" + + q := signer.Request.URL.Query() + assert.Equal(t, expectedSig, q.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, q.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func TestSignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=69ada33fec48180dab153576e4dd80c4e04124f80dda3eccfed8a67c2b91ed5e" + + q := signer.Request.Header + assert.Equal(t, expectedSig, q.Get("Authorization")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func TestSignEmptyBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "") + signer.Body = nil + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash) +} + +func TestSignBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) +} + +func TestSignSeekedBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello") + signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello" + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) + + start, _ := signer.Body.Seek(0, 1) + assert.Equal(t, int64(3), start) +} + +func TestPresignEmptyBodyS3(t *testing.T) { + signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "UNSIGNED-PAYLOAD", hash) +} + +func TestSignPrecomputedBodyChecksum(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") + signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "PRECOMPUTED", hash) +} + +func TestAnonymousCredentials(t *testing.T) { + r := aws.NewRequest( + aws.NewService(&aws.Config{Credentials: credentials.AnonymousCredentials}), + &aws.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + Sign(r) + + urlQ := r.HTTPRequest.URL.Query() + assert.Empty(t, urlQ.Get("X-Amz-Signature")) + assert.Empty(t, urlQ.Get("X-Amz-Credential")) + assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders")) + assert.Empty(t, urlQ.Get("X-Amz-Date")) + + hQ := r.HTTPRequest.Header + assert.Empty(t, hQ.Get("Authorization")) + assert.Empty(t, hQ.Get("X-Amz-Date")) +} + +func TestIgnoreResignRequestWithValidCreds(t *testing.T) { + r := aws.NewRequest( + aws.NewService(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }), + &aws.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + + Sign(r) + sig := r.HTTPRequest.Header.Get("Authorization") + + Sign(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestIgnorePreResignRequestWithValidCreds(t *testing.T) { + r := aws.NewRequest( + aws.NewService(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }), + &aws.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + Sign(r) + sig := r.HTTPRequest.Header.Get("X-Amz-Signature") + + Sign(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature")) +} + +func TestResignRequestExpiredCreds(t *testing.T) { + creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + r := aws.NewRequest( + aws.NewService(&aws.Config{Credentials: creds}), + &aws.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + Sign(r) + querySig := r.HTTPRequest.Header.Get("Authorization") + + creds.Expire() + + Sign(r) + assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestPreResignRequestExpiredCreds(t *testing.T) { + provider := &credentials.StaticProvider{credentials.Value{"AKID", "SECRET", "SESSION"}} + creds := credentials.NewCredentials(provider) + r := aws.NewRequest( + aws.NewService(&aws.Config{Credentials: creds}), + &aws.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + Sign(r) + querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature") + + creds.Expire() + r.Time = time.Now().Add(time.Hour * 48) + + Sign(r) + assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature")) +} + +func BenchmarkPresignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} + +func BenchmarkSignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 0000000000000..2b279e65999ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 0000000000000..ea819b1ec3594 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,89 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest", + "signingRegion": "us-east-1" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "us-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-west-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "sa-east-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com", + "signatureVersion": "v4" + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 0000000000000..3fab91c7f1978 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,101 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + SigningRegion: "us-east-1", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "ap-northeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "eu-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "sa-east-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + "us-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-west-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + }, +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go new file mode 100644 index 0000000000000..2add48890b3c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go @@ -0,0 +1,41 @@ +package endpoints_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/private/endpoints" +) + +func TestGenericEndpoint(t *testing.T) { + name := "service" + region := "mock-region-1" + + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com", name, region), ep) + assert.Empty(t, sr) +} + +func TestGlobalEndpoints(t *testing.T) { + region := "mock-region-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "waf"} + + for _, name := range svcs { + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.amazonaws.com", name), ep) + assert.Equal(t, "us-east-1", sr) + } +} + +func TestServicesInCN(t *testing.T) { + region := "cn-north-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3", "waf"} + + for _, name := range svcs { + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com.cn", name, region), ep) + assert.Empty(t, sr) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go new file mode 100644 index 0000000000000..0ead0126ee8be --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -0,0 +1,32 @@ +// Package ec2query provides serialisation of AWS EC2 requests and responses. +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// Build builds a request for the EC2 protocol. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, true); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go new file mode 100644 index 0000000000000..e135b93601dea --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go @@ -0,0 +1,85 @@ +// +build bench + +package ec2query_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func BenchmarkEC2QueryBuild_Complex_ec2AuthorizeSecurityGroupEgress(b *testing.B) { + params := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + + benchEC2QueryBuild(b, "AuthorizeSecurityGroupEgress", params) +} + +func BenchmarkEC2QueryBuild_Simple_ec2AttachNetworkInterface(b *testing.B) { + params := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(1), // Required + InstanceId: aws.String("String"), // Required + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + + benchEC2QueryBuild(b, "AttachNetworkInterface", params) +} + +func benchEC2QueryBuild(b *testing.B, opName string, params interface{}) { + svc := awstesting.NewClient() + svc.ServiceName = "ec2" + svc.APIVersion = "2015-04-15" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{ + Name: opName, + HTTPMethod: "POST", + HTTPPath: "/", + }, params, nil) + ec2query.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go new file mode 100644 index 0000000000000..887feeb1ea872 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go @@ -0,0 +1,1051 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + Bar *string `type:"string"` + + Foo *string `type:"string"` + + metadataInputService1TestShapeInputService1TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + Bar *string `locationName:"barLocationName" type:"string"` + + Foo *string `type:"string"` + + Yuck *string `locationName:"yuckLocationName" queryName:"yuckQueryName" type:"string"` + + metadataInputService2TestShapeInputService2TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeInputService2TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Input struct { + StructArg *InputService3TestShapeStructType `locationName:"Struct" type:"structure"` + + metadataInputService3TestShapeInputService3TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeStructType struct { + ScalarArg *string `locationName:"Scalar" type:"string"` + + metadataInputService3TestShapeStructType `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + ListArg []*string `type:"list"` + + metadataInputService4TestShapeInputService4TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + ListArg []*string `locationName:"ListMemberName" locationNameList:"item" type:"list"` + + metadataInputService5TestShapeInputService5TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + ListArg []*string `locationName:"ListMemberName" queryName:"ListQueryName" locationNameList:"item" type:"list"` + + metadataInputService6TestShapeInputService6TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService6TestShapeInputService6TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + BlobArg []byte `type:"blob"` + + metadataInputService7TestShapeInputService7TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService7TestShapeInputService7TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataInputService8TestShapeInputService8TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedToMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + Yuck: aws.String("val3"), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputService3TestCaseOperation1Input{ + StructArg: &InputService3TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestListTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go new file mode 100644 index 0000000000000..658190f70512a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go @@ -0,0 +1,54 @@ +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// Unmarshal unmarshals a response body for the EC2 protocol. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals response headers for the EC2 protocol. +func UnmarshalMeta(r *request.Request) { + // TODO implement unmarshaling of request IDs +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Code string `xml:"Errors>Error>Code"` + Message string `xml:"Errors>Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals a response error for the EC2 protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go new file mode 100644 index 0000000000000..c347c371c1095 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go @@ -0,0 +1,1132 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` + + metadataOutputService1TestShapeOutputService1TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + Blob []byte `type:"blob"` + + metadataOutputService2TestShapeOutputService2TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService2TestShapeOutputService2TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + ListMember []*string `type:"list"` + + metadataOutputService3TestShapeOutputService3TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService3TestShapeOutputService3TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + ListMember []*string `locationNameList:"item" type:"list"` + + metadataOutputService4TestShapeOutputService4TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService4TestShapeOutputService4TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService5TestShapeOutputService5TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService5TestShapeOutputService5TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + Map map[string]*OutputService6TestShapeStructureType `type:"map"` + + metadataOutputService6TestShapeOutputService6TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeOutputService6TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeStructureType struct { + Foo *string `locationName:"foo" type:"string"` + + metadataOutputService6TestShapeStructureType `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeStructureType struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + Map map[string]*string `type:"map" flattened:"true"` + + metadataOutputService7TestShapeOutputService7TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService7TestShapeOutputService7TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` + + metadataOutputService8TestShapeOutputService8TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeOutputService8TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + Foo *string `type:"string"` + + metadataOutputService9TestShapeOutputService9TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService9TestShapeOutputService9TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200arequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService9ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 0000000000000..2d78c35c2413f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,33 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go new file mode 100644 index 0000000000000..535821b2fd421 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go @@ -0,0 +1,2139 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation2 = "OperationName" + +// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation2, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) { + req, out := c.InputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation3 = "OperationName" + +// InputService1TestCaseOperation3Request generates a request for the InputService1TestCaseOperation3 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation3, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService1TestShapeInputService1TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) { + req, out := c.InputService1TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation2Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation2Output `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Output struct { + metadataInputService1TestShapeInputService1TestCaseOperation3Output `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputService1TestCaseOperation3Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + Bar *string `type:"string"` + + Baz *bool `type:"boolean"` + + Foo *string `type:"string"` + + metadataInputService1TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService1TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + StructArg *InputService2TestShapeStructType `type:"structure"` + + metadataInputService2TestShapeInputService2TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeInputService2TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService2TestShapeStructType struct { + ScalarArg *string `type:"string"` + + metadataInputService2TestShapeStructType `json:"-" xml:"-"` +} + +type metadataInputService2TestShapeStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + metadataInputService3TestShapeInputService3TestCaseOperation2Output `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputService3TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + ListArg []*string `type:"list"` + + metadataInputService3TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService3TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService4TestCaseOperation2 = "OperationName" + +// InputService4TestCaseOperation2Request generates a request for the InputService4TestCaseOperation2 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation2, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService4TestShapeInputService4TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation2Output, error) { + req, out := c.InputService4TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation2Output struct { + metadataInputService4TestShapeInputService4TestCaseOperation2Output `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputService4TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService4TestShapeInputShape struct { + ListArg []*string `type:"list" flattened:"true"` + + NamedListArg []*string `locationNameList:"Foo" type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` + + metadataInputService4TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService4TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + MapArg map[string]*string `type:"map" flattened:"true"` + + metadataInputService5TestShapeInputService5TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + ListArg []*string `locationNameList:"item" type:"list"` + + metadataInputService6TestShapeInputService6TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService6TestShapeInputService6TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + ListArg []*string `locationNameList:"ListArgLocation" type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` + + metadataInputService7TestShapeInputService7TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService7TestShapeInputService7TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + MapArg map[string]*string `type:"map"` + + metadataInputService8TestShapeInputService8TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + MapArg map[string]*string `locationNameKey:"TheKey" locationNameValue:"TheValue" type:"map"` + + metadataInputService9TestShapeInputService9TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + } + + if input == nil { + input = &InputService10TestShapeInputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Input struct { + BlobArg []byte `type:"blob"` + + metadataInputService10TestShapeInputService10TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService10TestShapeInputService10TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + metadataInputService10TestShapeInputService10TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService10TestShapeInputService10TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + } + + if input == nil { + input = &InputService11TestShapeInputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeInputService11TestCaseOperation1Input struct { + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataInputService11TestShapeInputService11TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataInputService11TestShapeInputService11TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + metadataInputService11TestShapeInputService11TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService11TestShapeInputService11TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation2 = "OperationName" + +// InputService12TestCaseOperation2Request generates a request for the InputService12TestCaseOperation2 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation2, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { + req, out := c.InputService12TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation3 = "OperationName" + +// InputService12TestCaseOperation3Request generates a request for the InputService12TestCaseOperation3 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation3Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation3, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation3(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation3Output, error) { + req, out := c.InputService12TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation4 = "OperationName" + +// InputService12TestCaseOperation4Request generates a request for the InputService12TestCaseOperation4 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation4Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation4, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation4(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation4Output, error) { + req, out := c.InputService12TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation5 = "OperationName" + +// InputService12TestCaseOperation5Request generates a request for the InputService12TestCaseOperation5 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation5Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation5, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation5(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation5Output, error) { + req, out := c.InputService12TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation6 = "OperationName" + +// InputService12TestCaseOperation6Request generates a request for the InputService12TestCaseOperation6 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation6Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation6, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + output = &InputService12TestShapeInputService12TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation6(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation6Output, error) { + req, out := c.InputService12TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + metadataInputService12TestShapeInputService12TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataInputService12TestShapeInputService12TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation2Output struct { + metadataInputService12TestShapeInputService12TestCaseOperation2Output `json:"-" xml:"-"` +} + +type metadataInputService12TestShapeInputService12TestCaseOperation2Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation3Output struct { + metadataInputService12TestShapeInputService12TestCaseOperation3Output `json:"-" xml:"-"` +} + +type metadataInputService12TestShapeInputService12TestCaseOperation3Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation4Output struct { + metadataInputService12TestShapeInputService12TestCaseOperation4Output `json:"-" xml:"-"` +} + +type metadataInputService12TestShapeInputService12TestCaseOperation4Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation5Output struct { + metadataInputService12TestShapeInputService12TestCaseOperation5Output `json:"-" xml:"-"` +} + +type metadataInputService12TestShapeInputService12TestCaseOperation5Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation6Output struct { + metadataInputService12TestShapeInputService12TestCaseOperation6Output `json:"-" xml:"-"` +} + +type metadataInputService12TestShapeInputService12TestCaseOperation6Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService12TestShapeInputShape struct { + RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` + + metadataInputService12TestShapeInputShape `json:"-" xml:"-"` +} + +type metadataInputService12TestShapeInputShape struct { + SDKShapeTraits bool `type:"structure"` +} + +type InputService12TestShapeRecursiveStructType struct { + NoRecurse *string `type:"string"` + + RecursiveList []*InputService12TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService12TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` + + metadataInputService12TestShapeRecursiveStructType `json:"-" xml:"-"` +} + +type metadataInputService12TestShapeRecursiveStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputShape{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestScalarMembersCase2(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputShape{ + Baz: aws.Bool(true), + } + req, _ := svc.InputService1TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Baz=true&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestScalarMembersCase3(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService1TestShapeInputShape{ + Baz: aws.Bool(false), + } + req, _ := svc.InputService1TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Baz=false&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + StructArg: &InputService2TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputShape{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService3TestShapeInputShape{ + ListArg: []*string{}, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg=&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputShape{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService4TestShapeInputShape{ + NamedListArg: []*string{ + aws.String("a"), + }, + } + req, _ := svc.InputService4TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Foo.1=a&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestSerializeFlattenedMapTypeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.1.key=key1&MapArg.1.value=val1&MapArg.2.key=key2&MapArg.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestNonFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.item.1=a&ListArg.item.2=b&ListArg.item.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArgLocation.1=a&ListArgLocation.2=b&ListArgLocation.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestSerializeMapTypeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService10TestShapeInputService10TestCaseOperation1Input{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService11TestShapeInputService11TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService12TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveList: []*InputService12TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveList: []*InputService12TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService12TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=foo&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=foo&RecursiveStruct.RecursiveMap.entry.2.key=bar&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 0000000000000..4afa4cf0eedd0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,223 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + var name string + + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 0000000000000..1fcab1d1a136e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,29 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + // TODO implement unmarshaling of request IDs +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 0000000000000..08609d9208810 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,33 @@ +package query + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go new file mode 100644 index 0000000000000..fe2a58e365030 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go @@ -0,0 +1,1878 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = awstesting.GenerateAssertions +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` + + metadataOutputService1TestShapeOutputService1TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService1TestShapeOutputService1TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + Num *int64 `type:"integer"` + + Str *string `type:"string"` + + metadataOutputService2TestShapeOutputService2TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService2TestShapeOutputService2TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + Blob []byte `type:"blob"` + + metadataOutputService3TestShapeOutputService3TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService3TestShapeOutputService3TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + ListMember []*string `type:"list"` + + metadataOutputService4TestShapeOutputService4TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService4TestShapeOutputService4TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + ListMember []*string `locationNameList:"item" type:"list"` + + metadataOutputService5TestShapeOutputService5TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService5TestShapeOutputService5TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService6TestShapeOutputService6TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService6TestShapeOutputService6TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + ListMember []*string `type:"list" flattened:"true"` + + metadataOutputService7TestShapeOutputService7TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService7TestShapeOutputService7TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + List []*OutputService8TestShapeStructureShape `type:"list"` + + metadataOutputService8TestShapeOutputService8TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeOutputService8TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService8TestShapeStructureShape struct { + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` + + metadataOutputService8TestShapeStructureShape `json:"-" xml:"-"` +} + +type metadataOutputService8TestShapeStructureShape struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"` + + metadataOutputService9TestShapeOutputService9TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService9TestShapeOutputService9TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService9TestShapeStructureShape struct { + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` + + metadataOutputService9TestShapeStructureShape `json:"-" xml:"-"` +} + +type metadataOutputService9TestShapeStructureShape struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + List []*string `locationNameList:"NamedList" type:"list" flattened:"true"` + + metadataOutputService10TestShapeOutputService10TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService10TestShapeOutputService10TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + Map map[string]*OutputService11TestShapeStructType `type:"map"` + + metadataOutputService11TestShapeOutputService11TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService11TestShapeOutputService11TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService11TestShapeStructType struct { + Foo *string `locationName:"foo" type:"string"` + + metadataOutputService11TestShapeStructType `json:"-" xml:"-"` +} + +type metadataOutputService11TestShapeStructType struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService12ProtocolTest client from just a session. +// svc := outputservice12protocoltest.New(mySession) +// +// // Create a OutputService12ProtocolTest client with additional configuration +// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest { + c := p.ClientConfig("outputservice12protocoltest", cfgs...) + return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest { + svc := &OutputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService12TestCaseOperation1 = "OperationName" + +// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService12TestCaseOperation1, + } + + if input == nil { + input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Output struct { + Map map[string]*string `type:"map" flattened:"true"` + + metadataOutputService12TestShapeOutputService12TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService12TestShapeOutputService12TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService13ProtocolTest client from just a session. +// svc := outputservice13protocoltest.New(mySession) +// +// // Create a OutputService13ProtocolTest client with additional configuration +// svc := outputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService13ProtocolTest { + c := p.ClientConfig("outputservice13protocoltest", cfgs...) + return newOutputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService13ProtocolTest { + svc := &OutputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService13TestCaseOperation1 = "OperationName" + +// OutputService13TestCaseOperation1Request generates a request for the OutputService13TestCaseOperation1 operation. +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *request.Request, output *OutputService13TestShapeOutputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService13TestCaseOperation1, + } + + if input == nil { + input = &OutputService13TestShapeOutputService13TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService13TestShapeOutputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (*OutputService13TestShapeOutputService13TestCaseOperation1Output, error) { + req, out := c.OutputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Input struct { + metadataOutputService13TestShapeOutputService13TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService13TestShapeOutputService13TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Output struct { + Map map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + metadataOutputService13TestShapeOutputService13TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService13TestShapeOutputService13TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService14ProtocolTest client from just a session. +// svc := outputservice14protocoltest.New(mySession) +// +// // Create a OutputService14ProtocolTest client with additional configuration +// svc := outputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService14ProtocolTest { + c := p.ClientConfig("outputservice14protocoltest", cfgs...) + return newOutputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService14ProtocolTest { + svc := &OutputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService14TestCaseOperation1 = "OperationName" + +// OutputService14TestCaseOperation1Request generates a request for the OutputService14TestCaseOperation1 operation. +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *request.Request, output *OutputService14TestShapeOutputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService14TestCaseOperation1, + } + + if input == nil { + input = &OutputService14TestShapeOutputService14TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService14TestShapeOutputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (*OutputService14TestShapeOutputService14TestCaseOperation1Output, error) { + req, out := c.OutputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Input struct { + metadataOutputService14TestShapeOutputService14TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService14TestShapeOutputService14TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Output struct { + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` + + metadataOutputService14TestShapeOutputService14TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService14TestShapeOutputService14TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService15ProtocolTest client from just a session. +// svc := outputservice15protocoltest.New(mySession) +// +// // Create a OutputService15ProtocolTest client with additional configuration +// svc := outputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService15ProtocolTest { + c := p.ClientConfig("outputservice15protocoltest", cfgs...) + return newOutputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService15ProtocolTest { + svc := &OutputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + return svc +} + +// newRequest creates a new request for a OutputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService15TestCaseOperation1 = "OperationName" + +// OutputService15TestCaseOperation1Request generates a request for the OutputService15TestCaseOperation1 operation. +func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1Request(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (req *request.Request, output *OutputService15TestShapeOutputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService15TestCaseOperation1, + } + + if input == nil { + input = &OutputService15TestShapeOutputService15TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService15TestShapeOutputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (*OutputService15TestShapeOutputService15TestCaseOperation1Output, error) { + req, out := c.OutputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService15TestShapeOutputService15TestCaseOperation1Input struct { + metadataOutputService15TestShapeOutputService15TestCaseOperation1Input `json:"-" xml:"-"` +} + +type metadataOutputService15TestShapeOutputService15TestCaseOperation1Input struct { + SDKShapeTraits bool `type:"structure"` +} + +type OutputService15TestShapeOutputService15TestCaseOperation1Output struct { + Foo *string `type:"string"` + + metadataOutputService15TestShapeOutputService15TestCaseOperation1Output `json:"-" xml:"-"` +} + +type metadataOutputService15TestShapeOutputService15TestCaseOperation1Output struct { + SDKShapeTraits bool `type:"structure"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("mynamerequest-id")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "myname", *out.Str) + +} + +func TestOutputService3ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abcrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + +} + +func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abrequestid")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.List[0]) + assert.Equal(t, "b", *out.List[1]) + +} + +func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarrequestid")) + req, out := svc.OutputService13TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService14TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService15ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService15TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 0000000000000..87352bc60defd --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,254 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + buf.WriteByte('%') + buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 0000000000000..1f603bb719fef --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 0000000000000..06d9accbacb18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,183 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 0000000000000..d3db250231b59 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,287 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("SDKShapeTraits"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + member := elemOf(value.Field(i)) + field := t.Field(i) + mTag := field.Tag + + if mTag.Get("location") != "" { // skip non-body members + continue + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 0000000000000..5e4fe210b3698 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("SDKShapeTraits"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 0000000000000..72c198a9d8d00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go new file mode 100644 index 0000000000000..51a26d6ade6d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go @@ -0,0 +1,42 @@ +package v4_test + +import ( + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +func TestPresignHandler(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + ContentDisposition: aws.String("a+b c$d"), + ACL: aws.String("public-read"), + }) + req.Time = time.Unix(0, 0) + urlstr, err := req.Presign(5 * time.Minute) + + assert.NoError(t, err) + + expectedDate := "19700101T000000Z" + expectedHeaders := "host;x-amz-acl" + expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2" + expectedCred := "AKID/19700101/mock-region/s3/aws4_request" + + u, _ := url.Parse(urlstr) + urlQ := u.Query() + assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) + assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) + + assert.NotContains(t, urlstr, "+") // + encoded as %20 +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go new file mode 100644 index 0000000000000..dc176f312ba7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go @@ -0,0 +1,365 @@ +// Package v4 implements signing for AWS V4 signer +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug aws.LogLevelType + Logger aws.Logger + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign requests with signature version 4. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + } + + req.Error = s.sign() +} + +func (v4 *signer) sign() error { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isRequestSigned() { + if !v4.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v4.isPresign { + v4.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v4.Request.URL.RawQuery = v4.Query.Encode() + } + } + + var err error + v4.CredValues, err = v4.Credentials.Get() + if err != nil { + return err + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.CredValues.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.CredValues.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } + + v4.build() + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *signer) logSigningInfo() { + signedURLMsg := "" + if v4.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (v4 *signer) build() { + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + if v4.isPresign { + v4.buildQuery() // no depends + } + v4.buildCanonicalHeaders() // depends on cred string + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) + } +} + +func (v4 *signer) buildQuery() { + for k, h := range v4.Request.Header { + if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") { + continue // never hoist x-amz-* headers, they must be signed + } + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // never hoist ignored headers + } + + v4.Request.Header.Del(k) + v4.Query.Del(k) + for _, v := range h { + v4.Query.Add(k, v) + } + } +} + +func (v4 *signer) buildCanonicalHeaders() { + var headers []string + headers = append(headers, "host") + for k := range v4.Request.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v4.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.CredValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (v4 *signer) isRequestSigned() bool { + if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { + return true + } + if v4.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v4 *signer) removePresign() { + v4.Query.Del("X-Amz-Algorithm") + v4.Query.Del("X-Amz-Signature") + v4.Query.Del("X-Amz-Security-Token") + v4.Query.Del("X-Amz-Date") + v4.Query.Del("X-Amz-Expires") + v4.Query.Del("X-Amz-Credential") + v4.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go new file mode 100644 index 0000000000000..f39414fcf91a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go @@ -0,0 +1,252 @@ +package v4 + +import ( + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer { + endpoint := "https://" + serviceName + "." + region + ".amazonaws.com" + reader := strings.NewReader(body) + req, _ := http.NewRequest("POST", endpoint, reader) + req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()" + req.Header.Add("X-Amz-Target", "prefix.Operation") + req.Header.Add("Content-Type", "application/x-amz-json-1.0") + req.Header.Add("Content-Length", string(len(body))) + req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)") + + return signer{ + Request: req, + Time: signTime, + ExpireTime: expireTime, + Query: req.URL.Query(), + Body: reader, + ServiceName: serviceName, + Region: region, + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + } +} + +func removeWS(text string) string { + text = strings.Replace(text, " ", "", -1) + text = strings.Replace(text, "\n", "", -1) + text = strings.Replace(text, "\t", "", -1) + return text +} + +func assertEqual(t *testing.T, expected, given string) { + if removeWS(expected) != removeWS(given) { + t.Errorf("\nExpected: %s\nGiven: %s", expected, given) + } +} + +func TestPresignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedHeaders := "host;x-amz-meta-other-header;x-amz-target" + expectedSig := "5eeedebf6f995145ce56daa02902d10485246d3defb34f97b973c1f40ab82d36" + expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request" + + q := signer.Request.URL.Query() + assert.Equal(t, expectedSig, q.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, q.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func TestSignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=69ada33fec48180dab153576e4dd80c4e04124f80dda3eccfed8a67c2b91ed5e" + + q := signer.Request.Header + assert.Equal(t, expectedSig, q.Get("Authorization")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func TestSignEmptyBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "") + signer.Body = nil + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash) +} + +func TestSignBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) +} + +func TestSignSeekedBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello") + signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello" + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) + + start, _ := signer.Body.Seek(0, 1) + assert.Equal(t, int64(3), start) +} + +func TestPresignEmptyBodyS3(t *testing.T) { + signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "UNSIGNED-PAYLOAD", hash) +} + +func TestSignPrecomputedBodyChecksum(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") + signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "PRECOMPUTED", hash) +} + +func TestAnonymousCredentials(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{Credentials: credentials.AnonymousCredentials}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + Sign(r) + + urlQ := r.HTTPRequest.URL.Query() + assert.Empty(t, urlQ.Get("X-Amz-Signature")) + assert.Empty(t, urlQ.Get("X-Amz-Credential")) + assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders")) + assert.Empty(t, urlQ.Get("X-Amz-Date")) + + hQ := r.HTTPRequest.Header + assert.Empty(t, hQ.Get("Authorization")) + assert.Empty(t, hQ.Get("X-Amz-Date")) +} + +func TestIgnoreResignRequestWithValidCreds(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + + Sign(r) + sig := r.HTTPRequest.Header.Get("Authorization") + + Sign(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestIgnorePreResignRequestWithValidCreds(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + Sign(r) + sig := r.HTTPRequest.Header.Get("X-Amz-Signature") + + Sign(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature")) +} + +func TestResignRequestExpiredCreds(t *testing.T) { + creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + Sign(r) + querySig := r.HTTPRequest.Header.Get("Authorization") + + creds.Expire() + + Sign(r) + assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestPreResignRequestExpiredCreds(t *testing.T) { + provider := &credentials.StaticProvider{Value: credentials.Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "SESSION", + }} + creds := credentials.NewCredentials(provider) + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + Sign(r) + querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature") + + creds.Expire() + r.Time = time.Now().Add(time.Hour * 48) + + Sign(r) + assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature")) +} + +func BenchmarkPresignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} + +func BenchmarkSignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go new file mode 100644 index 0000000000000..335af91dc612a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -0,0 +1,103 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + if err := req.Send(); err != nil { + return err + } + + for _, a := range w.Acceptors { + result := false + switch a.Matcher { + case "pathAll": + if vals, _ := awsutil.ValuesAtPath(req.Data, a.Argument); req.Error == nil && vals != nil { + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + } + case "pathAny": + if vals, _ := awsutil.ValuesAtPath(req.Data, a.Argument); req.Error == nil && vals != nil { + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + } + + if result { + switch a.State { + case "success": + return nil // waiter completed + case "failure": + if req.Error == nil { + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), nil) + } + return req.Error // waiter failed + case "retry": + // do nothing, just retry + } + break + } + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go new file mode 100644 index 0000000000000..58267295b117d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go @@ -0,0 +1,183 @@ +package waiter_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/waiter" +) + +type mockClient struct { + *client.Client +} +type MockInput struct{} +type MockOutput struct { + States []*MockState +} +type MockState struct { + State *string +} + +func (c *mockClient) MockRequest(input *MockInput) (*request.Request, *MockOutput) { + op := &request.Operation{ + Name: "Mock", + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MockInput{} + } + + output := &MockOutput{} + req := c.NewRequest(op, input, output) + req.Data = output + return req, output +} + +var mockAcceptors = []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "States[].State", + Expected: "stopping", + }, +} + +func TestWaiter(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 1 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 1 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: mockAcceptors, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterFailure(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 1 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 1 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("stopping")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: mockAcceptors, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait().(awserr.Error) + assert.Error(t, err) + assert.Equal(t, "ResourceNotReady", err.Code()) + assert.Equal(t, "failed waiting for successful resource state", err.Message()) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go new file mode 100644 index 0000000000000..2099e40491d31 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -0,0 +1,1481 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatch provides a client for Amazon CloudWatch. +package cloudwatch + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opDeleteAlarms = "DeleteAlarms" + +// DeleteAlarmsRequest generates a request for the DeleteAlarms operation. +func (c *CloudWatch) DeleteAlarmsRequest(input *DeleteAlarmsInput) (req *request.Request, output *DeleteAlarmsOutput) { + op := &request.Operation{ + Name: opDeleteAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAlarmsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAlarmsOutput{} + req.Data = output + return +} + +// Deletes all specified alarms. In the event of an error, no alarms are deleted. +func (c *CloudWatch) DeleteAlarms(input *DeleteAlarmsInput) (*DeleteAlarmsOutput, error) { + req, out := c.DeleteAlarmsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAlarmHistory = "DescribeAlarmHistory" + +// DescribeAlarmHistoryRequest generates a request for the DescribeAlarmHistory operation. +func (c *CloudWatch) DescribeAlarmHistoryRequest(input *DescribeAlarmHistoryInput) (req *request.Request, output *DescribeAlarmHistoryOutput) { + op := &request.Operation{ + Name: opDescribeAlarmHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmHistoryOutput{} + req.Data = output + return +} + +// Retrieves history for the specified alarm. Filter alarms by date range or +// item type. If an alarm name is not specified, Amazon CloudWatch returns histories +// for all of the owner's alarms. +func (c *CloudWatch) DescribeAlarmHistory(input *DescribeAlarmHistoryInput) (*DescribeAlarmHistoryOutput, error) { + req, out := c.DescribeAlarmHistoryRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) DescribeAlarmHistoryPages(input *DescribeAlarmHistoryInput, fn func(p *DescribeAlarmHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAlarmHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAlarmHistoryOutput), lastPage) + }) +} + +const opDescribeAlarms = "DescribeAlarms" + +// DescribeAlarmsRequest generates a request for the DescribeAlarms operation. +func (c *CloudWatch) DescribeAlarmsRequest(input *DescribeAlarmsInput) (req *request.Request, output *DescribeAlarmsOutput) { + op := &request.Operation{ + Name: opDescribeAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmsOutput{} + req.Data = output + return +} + +// Retrieves alarms with the specified names. If no name is specified, all alarms +// for the user are returned. Alarms can be retrieved by using only a prefix +// for the alarm name, the alarm state, or a prefix for any action. +func (c *CloudWatch) DescribeAlarms(input *DescribeAlarmsInput) (*DescribeAlarmsOutput, error) { + req, out := c.DescribeAlarmsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) DescribeAlarmsPages(input *DescribeAlarmsInput, fn func(p *DescribeAlarmsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAlarmsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAlarmsOutput), lastPage) + }) +} + +const opDescribeAlarmsForMetric = "DescribeAlarmsForMetric" + +// DescribeAlarmsForMetricRequest generates a request for the DescribeAlarmsForMetric operation. +func (c *CloudWatch) DescribeAlarmsForMetricRequest(input *DescribeAlarmsForMetricInput) (req *request.Request, output *DescribeAlarmsForMetricOutput) { + op := &request.Operation{ + Name: opDescribeAlarmsForMetric, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAlarmsForMetricInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmsForMetricOutput{} + req.Data = output + return +} + +// Retrieves all alarms for a single metric. Specify a statistic, period, or +// unit to filter the set of alarms further. +func (c *CloudWatch) DescribeAlarmsForMetric(input *DescribeAlarmsForMetricInput) (*DescribeAlarmsForMetricOutput, error) { + req, out := c.DescribeAlarmsForMetricRequest(input) + err := req.Send() + return out, err +} + +const opDisableAlarmActions = "DisableAlarmActions" + +// DisableAlarmActionsRequest generates a request for the DisableAlarmActions operation. +func (c *CloudWatch) DisableAlarmActionsRequest(input *DisableAlarmActionsInput) (req *request.Request, output *DisableAlarmActionsOutput) { + op := &request.Operation{ + Name: opDisableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAlarmActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableAlarmActionsOutput{} + req.Data = output + return +} + +// Disables actions for the specified alarms. When an alarm's actions are disabled +// the alarm's state may change, but none of the alarm's actions will execute. +func (c *CloudWatch) DisableAlarmActions(input *DisableAlarmActionsInput) (*DisableAlarmActionsOutput, error) { + req, out := c.DisableAlarmActionsRequest(input) + err := req.Send() + return out, err +} + +const opEnableAlarmActions = "EnableAlarmActions" + +// EnableAlarmActionsRequest generates a request for the EnableAlarmActions operation. +func (c *CloudWatch) EnableAlarmActionsRequest(input *EnableAlarmActionsInput) (req *request.Request, output *EnableAlarmActionsOutput) { + op := &request.Operation{ + Name: opEnableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAlarmActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableAlarmActionsOutput{} + req.Data = output + return +} + +// Enables actions for the specified alarms. +func (c *CloudWatch) EnableAlarmActions(input *EnableAlarmActionsInput) (*EnableAlarmActionsOutput, error) { + req, out := c.EnableAlarmActionsRequest(input) + err := req.Send() + return out, err +} + +const opGetMetricStatistics = "GetMetricStatistics" + +// GetMetricStatisticsRequest generates a request for the GetMetricStatistics operation. +func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput) (req *request.Request, output *GetMetricStatisticsOutput) { + op := &request.Operation{ + Name: opGetMetricStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMetricStatisticsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetMetricStatisticsOutput{} + req.Data = output + return +} + +// Gets statistics for the specified metric. +// +// The maximum number of data points returned from a single GetMetricStatistics +// request is 1,440, wereas the maximum number of data points that can be queried +// is 50,850. If you make a request that generates more than 1,440 data points, +// Amazon CloudWatch returns an error. In such a case, you can alter the request +// by narrowing the specified time range or increasing the specified period. +// Alternatively, you can make multiple requests across adjacent time ranges. +// +// Amazon CloudWatch aggregates data points based on the length of the period +// that you specify. For example, if you request statistics with a one-minute +// granularity, Amazon CloudWatch aggregates data points with time stamps that +// fall within the same one-minute period. In such a case, the data points queried +// can greatly outnumber the data points returned. +// +// The following examples show various statistics allowed by the data point +// query maximum of 50,850 when you call GetMetricStatistics on Amazon EC2 instances +// with detailed (one-minute) monitoring enabled: +// +// Statistics for up to 400 instances for a span of one hour Statistics for +// up to 35 instances over a span of 24 hours Statistics for up to 2 instances +// over a span of 2 weeks For information about the namespace, metric names, +// and dimensions that other Amazon Web Services products use to send metrics +// to Cloudwatch, go to Amazon CloudWatch Metrics, Namespaces, and Dimensions +// Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) +// in the Amazon CloudWatch Developer Guide. +func (c *CloudWatch) GetMetricStatistics(input *GetMetricStatisticsInput) (*GetMetricStatisticsOutput, error) { + req, out := c.GetMetricStatisticsRequest(input) + err := req.Send() + return out, err +} + +const opListMetrics = "ListMetrics" + +// ListMetricsRequest generates a request for the ListMetrics operation. +func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.Request, output *ListMetricsOutput) { + op := &request.Operation{ + Name: opListMetrics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMetricsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMetricsOutput{} + req.Data = output + return +} + +// Returns a list of valid metrics stored for the AWS account owner. Returned +// metrics can be used with GetMetricStatistics to obtain statistical data for +// a given metric. +func (c *CloudWatch) ListMetrics(input *ListMetricsInput) (*ListMetricsOutput, error) { + req, out := c.ListMetricsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) ListMetricsPages(input *ListMetricsInput, fn func(p *ListMetricsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMetricsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMetricsOutput), lastPage) + }) +} + +const opPutMetricAlarm = "PutMetricAlarm" + +// PutMetricAlarmRequest generates a request for the PutMetricAlarm operation. +func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *request.Request, output *PutMetricAlarmOutput) { + op := &request.Operation{ + Name: opPutMetricAlarm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricAlarmInput{} + } + + req = c.newRequest(op, input, output) + output = &PutMetricAlarmOutput{} + req.Data = output + return +} + +// Creates or updates an alarm and associates it with the specified Amazon CloudWatch +// metric. Optionally, this operation can associate one or more Amazon Simple +// Notification Service resources with the alarm. +// +// When this operation creates an alarm, the alarm state is immediately set +// to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. +// Any actions associated with the StateValue is then executed. +func (c *CloudWatch) PutMetricAlarm(input *PutMetricAlarmInput) (*PutMetricAlarmOutput, error) { + req, out := c.PutMetricAlarmRequest(input) + err := req.Send() + return out, err +} + +const opPutMetricData = "PutMetricData" + +// PutMetricDataRequest generates a request for the PutMetricData operation. +func (c *CloudWatch) PutMetricDataRequest(input *PutMetricDataInput) (req *request.Request, output *PutMetricDataOutput) { + op := &request.Operation{ + Name: opPutMetricData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricDataInput{} + } + + req = c.newRequest(op, input, output) + output = &PutMetricDataOutput{} + req.Data = output + return +} + +// Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch associates +// the data points with the specified metric. If the specified metric does not +// exist, Amazon CloudWatch creates the metric. It can take up to fifteen minutes +// for a new metric to appear in calls to the ListMetrics action. +// +// The size of a PutMetricData request is limited to 8 KB for HTTP GET requests +// and 40 KB for HTTP POST requests. +// +// Although the Value parameter accepts numbers of type Double, Amazon CloudWatch +// truncates values with very large exponents. Values with base-10 exponents +// greater than 126 (1 x 10^126) are truncated. Likewise, values with base-10 +// exponents less than -130 (1 x 10^-130) are also truncated. Data that is +// timestamped 24 hours or more in the past may take in excess of 48 hours to +// become available from submission time using GetMetricStatistics. +func (c *CloudWatch) PutMetricData(input *PutMetricDataInput) (*PutMetricDataOutput, error) { + req, out := c.PutMetricDataRequest(input) + err := req.Send() + return out, err +} + +const opSetAlarmState = "SetAlarmState" + +// SetAlarmStateRequest generates a request for the SetAlarmState operation. +func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *request.Request, output *SetAlarmStateOutput) { + op := &request.Operation{ + Name: opSetAlarmState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetAlarmStateInput{} + } + + req = c.newRequest(op, input, output) + output = &SetAlarmStateOutput{} + req.Data = output + return +} + +// Temporarily sets the state of an alarm. When the updated StateValue differs +// from the previous value, the action configured for the appropriate state +// is invoked. This is not a permanent change. The next periodic alarm check +// (in about a minute) will set the alarm to its actual state. +func (c *CloudWatch) SetAlarmState(input *SetAlarmStateInput) (*SetAlarmStateOutput, error) { + req, out := c.SetAlarmStateRequest(input) + err := req.Send() + return out, err +} + +// The AlarmHistoryItem data type contains descriptive information about the +// history of a specific alarm. If you call DescribeAlarmHistory, Amazon CloudWatch +// returns this data type as part of the DescribeAlarmHistoryResult data type. +type AlarmHistoryItem struct { + // The descriptive name for the alarm. + AlarmName *string `min:"1" type:"string"` + + // Machine-readable data about the alarm in JSON format. + HistoryData *string `min:"1" type:"string"` + + // The type of alarm history item. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // A human-readable summary of the alarm history. + HistorySummary *string `min:"1" type:"string"` + + // The time stamp for the alarm history item. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataAlarmHistoryItem `json:"-" xml:"-"` +} + +type metadataAlarmHistoryItem struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AlarmHistoryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlarmHistoryItem) GoString() string { + return s.String() +} + +// The Datapoint data type encapsulates the statistical data that Amazon CloudWatch +// computes from metric data. +type Datapoint struct { + // The average of metric values that correspond to the datapoint. + Average *float64 `type:"double"` + + // The maximum of the metric value used for the datapoint. + Maximum *float64 `type:"double"` + + // The minimum metric value used for the datapoint. + Minimum *float64 `type:"double"` + + // The number of metric values that contributed to the aggregate value of this + // datapoint. + SampleCount *float64 `type:"double"` + + // The sum of metric values used for the datapoint. + Sum *float64 `type:"double"` + + // The time stamp used for the datapoint. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The standard unit used for the datapoint. + Unit *string `type:"string" enum:"StandardUnit"` + + metadataDatapoint `json:"-" xml:"-"` +} + +type metadataDatapoint struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Datapoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Datapoint) GoString() string { + return s.String() +} + +type DeleteAlarmsInput struct { + // A list of alarms to be deleted. + AlarmNames []*string `type:"list" required:"true"` + + metadataDeleteAlarmsInput `json:"-" xml:"-"` +} + +type metadataDeleteAlarmsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAlarmsInput) GoString() string { + return s.String() +} + +type DeleteAlarmsOutput struct { + metadataDeleteAlarmsOutput `json:"-" xml:"-"` +} + +type metadataDeleteAlarmsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAlarmsOutput) GoString() string { + return s.String() +} + +type DescribeAlarmHistoryInput struct { + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The ending date to retrieve alarm history. + EndDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The type of alarm histories to retrieve. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // The maximum number of alarm history records to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // The starting date to retrieve alarm history. + StartDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataDescribeAlarmHistoryInput `json:"-" xml:"-"` +} + +type metadataDescribeAlarmHistoryInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAlarmHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmHistoryInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarmHistory action. +type DescribeAlarmHistoryOutput struct { + // A list of alarm histories in JSON format. + AlarmHistoryItems []*AlarmHistoryItem `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` + + metadataDescribeAlarmHistoryOutput `json:"-" xml:"-"` +} + +type metadataDescribeAlarmHistoryOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAlarmHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmHistoryOutput) GoString() string { + return s.String() +} + +type DescribeAlarmsForMetricInput struct { + // The list of dimensions associated with the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string" required:"true"` + + // The period in seconds over which the statistic is applied. + Period *int64 `min:"60" type:"integer"` + + // The statistic for the metric. + Statistic *string `type:"string" enum:"Statistic"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` + + metadataDescribeAlarmsForMetricInput `json:"-" xml:"-"` +} + +type metadataDescribeAlarmsForMetricInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAlarmsForMetricInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsForMetricInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarmsForMetric action. +type DescribeAlarmsForMetricOutput struct { + // A list of information for each alarm with the specified metric. + MetricAlarms []*MetricAlarm `type:"list"` + + metadataDescribeAlarmsForMetricOutput `json:"-" xml:"-"` +} + +type metadataDescribeAlarmsForMetricOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAlarmsForMetricOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsForMetricOutput) GoString() string { + return s.String() +} + +type DescribeAlarmsInput struct { + // The action name prefix. + ActionPrefix *string `min:"1" type:"string"` + + // The alarm name prefix. AlarmNames cannot be specified if this parameter is + // specified. + AlarmNamePrefix *string `min:"1" type:"string"` + + // A list of alarm names to retrieve information for. + AlarmNames []*string `type:"list"` + + // The maximum number of alarm descriptions to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // The state value to be used in matching alarms. + StateValue *string `type:"string" enum:"StateValue"` + + metadataDescribeAlarmsInput `json:"-" xml:"-"` +} + +type metadataDescribeAlarmsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarms action. +type DescribeAlarmsOutput struct { + // A list of information for the specified alarms. + MetricAlarms []*MetricAlarm `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` + + metadataDescribeAlarmsOutput `json:"-" xml:"-"` +} + +type metadataDescribeAlarmsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsOutput) GoString() string { + return s.String() +} + +// The Dimension data type further expands on the identity of a metric using +// a Name, Value pair. +// +// For examples that use one or more dimensions, see PutMetricData. +type Dimension struct { + // The name of the dimension. + Name *string `min:"1" type:"string" required:"true"` + + // The value representing the dimension measurement + Value *string `min:"1" type:"string" required:"true"` + + metadataDimension `json:"-" xml:"-"` +} + +type metadataDimension struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Dimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dimension) GoString() string { + return s.String() +} + +// The DimensionFilter data type is used to filter ListMetrics results. +type DimensionFilter struct { + // The dimension name to be matched. + Name *string `min:"1" type:"string" required:"true"` + + // The value of the dimension to be matched. + Value *string `min:"1" type:"string"` + + metadataDimensionFilter `json:"-" xml:"-"` +} + +type metadataDimensionFilter struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DimensionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DimensionFilter) GoString() string { + return s.String() +} + +type DisableAlarmActionsInput struct { + // The names of the alarms to disable actions for. + AlarmNames []*string `type:"list" required:"true"` + + metadataDisableAlarmActionsInput `json:"-" xml:"-"` +} + +type metadataDisableAlarmActionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAlarmActionsInput) GoString() string { + return s.String() +} + +type DisableAlarmActionsOutput struct { + metadataDisableAlarmActionsOutput `json:"-" xml:"-"` +} + +type metadataDisableAlarmActionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAlarmActionsOutput) GoString() string { + return s.String() +} + +type EnableAlarmActionsInput struct { + // The names of the alarms to enable actions for. + AlarmNames []*string `type:"list" required:"true"` + + metadataEnableAlarmActionsInput `json:"-" xml:"-"` +} + +type metadataEnableAlarmActionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EnableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAlarmActionsInput) GoString() string { + return s.String() +} + +type EnableAlarmActionsOutput struct { + metadataEnableAlarmActionsOutput `json:"-" xml:"-"` +} + +type metadataEnableAlarmActionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EnableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAlarmActionsOutput) GoString() string { + return s.String() +} + +type GetMetricStatisticsInput struct { + // A list of dimensions describing qualities of the metric. + Dimensions []*Dimension `type:"list"` + + // The time stamp to use for determining the last datapoint to return. The value + // specified is exclusive; results will include datapoints up to the time stamp + // specified. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the metric, with or without spaces. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric, with or without spaces. + Namespace *string `min:"1" type:"string" required:"true"` + + // The granularity, in seconds, of the returned datapoints. Period must be at + // least 60 seconds and must be a multiple of 60. The default value is 60. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The time stamp to use for determining the first datapoint to return. The + // value specified is inclusive; results include datapoints with the time stamp + // specified. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The metric statistics to return. For information about specific statistics + // returned by GetMetricStatistics, go to Statistics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) + // in the Amazon CloudWatch Developer Guide. + // + // Valid Values: Average | Sum | SampleCount | Maximum | Minimum + Statistics []*string `min:"1" type:"list" required:"true"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` + + metadataGetMetricStatisticsInput `json:"-" xml:"-"` +} + +type metadataGetMetricStatisticsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s GetMetricStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricStatisticsInput) GoString() string { + return s.String() +} + +// The output for the GetMetricStatistics action. +type GetMetricStatisticsOutput struct { + // The datapoints for the specified metric. + Datapoints []*Datapoint `type:"list"` + + // A label describing the specified metric. + Label *string `type:"string"` + + metadataGetMetricStatisticsOutput `json:"-" xml:"-"` +} + +type metadataGetMetricStatisticsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s GetMetricStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricStatisticsOutput) GoString() string { + return s.String() +} + +type ListMetricsInput struct { + // A list of dimensions to filter against. + Dimensions []*DimensionFilter `type:"list"` + + // The name of the metric to filter against. + MetricName *string `min:"1" type:"string"` + + // The namespace to filter against. + Namespace *string `min:"1" type:"string"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + metadataListMetricsInput `json:"-" xml:"-"` +} + +type metadataListMetricsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ListMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMetricsInput) GoString() string { + return s.String() +} + +// The output for the ListMetrics action. +type ListMetricsOutput struct { + // A list of metrics used to generate statistics for an AWS account. + Metrics []*Metric `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` + + metadataListMetricsOutput `json:"-" xml:"-"` +} + +type metadataListMetricsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ListMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMetricsOutput) GoString() string { + return s.String() +} + +// The Metric data type contains information about a specific metric. If you +// call ListMetrics, Amazon CloudWatch returns information contained by this +// data type. +// +// The example in the Examples section publishes two metrics named buffers +// and latency. Both metrics are in the examples namespace. Both metrics have +// two dimensions, InstanceID and InstanceType. +type Metric struct { + // A list of dimensions associated with the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string"` + + metadataMetric `json:"-" xml:"-"` +} + +type metadataMetric struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Metric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metric) GoString() string { + return s.String() +} + +// The MetricAlarm data type represents an alarm. You can use PutMetricAlarm +// to create or update an alarm. +type MetricAlarm struct { + // Indicates whether actions should be executed during any changes to the alarm's + // state. + ActionsEnabled *bool `type:"boolean"` + + // The list of actions to execute when this alarm transitions into an ALARM + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only actions supported are publishing to an Amazon + // SNS topic and triggering an Auto Scaling policy. + AlarmActions []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmArn *string `min:"1" type:"string"` + + // The time stamp of the last update to the alarm configuration. Amazon CloudWatch + // uses Coordinated Universal Time (UTC) when returning time stamps, which do + // not accommodate seasonal adjustments such as daylight savings time. For more + // information, see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + AlarmConfigurationUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. The specified Statistic value is used as the first operand. + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // The list of dimensions associated with the alarm's associated metric. + Dimensions []*Dimension `type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only actions supported are publishing to an Amazon + // SNS topic or triggering an Auto Scaling policy. + // + // The current WSDL lists this attribute as UnknownActions. + InsufficientDataActions []*string `type:"list"` + + // The name of the alarm's metric. + MetricName *string `min:"1" type:"string"` + + // The namespace of alarm's associated metric. + Namespace *string `min:"1" type:"string"` + + // The list of actions to execute when this alarm transitions into an OK state + // from any other state. Each action is specified as an Amazon Resource Number + // (ARN). Currently the only actions supported are publishing to an Amazon SNS + // topic and triggering an Auto Scaling policy. + OKActions []*string `type:"list"` + + // The period in seconds over which the statistic is applied. + Period *int64 `min:"60" type:"integer"` + + // A human-readable explanation for the alarm's state. + StateReason *string `type:"string"` + + // An explanation for the alarm's state in machine-readable JSON format + StateReasonData *string `type:"string"` + + // The time stamp of the last update to the alarm's state. Amazon CloudWatch + // uses Coordinated Universal Time (UTC) when returning time stamps, which do + // not accommodate seasonal adjustments such as daylight savings time. For more + // information, see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + StateUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state value for the alarm. + StateValue *string `type:"string" enum:"StateValue"` + + // The statistic to apply to the alarm's associated metric. + Statistic *string `type:"string" enum:"Statistic"` + + // The value against which the specified statistic is compared. + Threshold *float64 `type:"double"` + + // The unit of the alarm's associated metric. + Unit *string `type:"string" enum:"StandardUnit"` + + metadataMetricAlarm `json:"-" xml:"-"` +} + +type metadataMetricAlarm struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s MetricAlarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricAlarm) GoString() string { + return s.String() +} + +// The MetricDatum data type encapsulates the information sent with PutMetricData +// to either create a new metric or add new values to be aggregated into an +// existing metric. +type MetricDatum struct { + // A list of dimensions associated with the metric. Note, when using the Dimensions + // value in a query, you need to append .member.N to it (e.g., Dimensions.member.N). + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // A set of statistical values describing the metric. + StatisticValues *StatisticSet `type:"structure"` + + // The time stamp used for the metric. If not specified, the default value is + // set to the time the metric data was received. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unit of the metric. + Unit *string `type:"string" enum:"StandardUnit"` + + // The value for the metric. + // + // Although the Value parameter accepts numbers of type Double, Amazon CloudWatch + // truncates values with very large exponents. Values with base-10 exponents + // greater than 126 (1 x 10^126) are truncated. Likewise, values with base-10 + // exponents less than -130 (1 x 10^-130) are also truncated. + Value *float64 `type:"double"` + + metadataMetricDatum `json:"-" xml:"-"` +} + +type metadataMetricDatum struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s MetricDatum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricDatum) GoString() string { + return s.String() +} + +type PutMetricAlarmInput struct { + // Indicates whether or not actions should be executed during any changes to + // the alarm's state. + ActionsEnabled *bool `type:"boolean"` + + // The list of actions to execute when this alarm transitions into an ALARM + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only action supported is publishing to an Amazon + // SNS topic or an Amazon Auto Scaling policy. + AlarmActions []*string `type:"list"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The descriptive name for the alarm. This name must be unique within the user's + // AWS account + AlarmName *string `min:"1" type:"string" required:"true"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. The specified Statistic value is used as the first operand. + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // The dimensions for the alarm's associated metric. + Dimensions []*Dimension `type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only action supported is publishing to an Amazon + // SNS topic or an Amazon Auto Scaling policy. + InsufficientDataActions []*string `type:"list"` + + // The name for the alarm's associated metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace for the alarm's associated metric. + Namespace *string `min:"1" type:"string" required:"true"` + + // The list of actions to execute when this alarm transitions into an OK state + // from any other state. Each action is specified as an Amazon Resource Number + // (ARN). Currently the only action supported is publishing to an Amazon SNS + // topic or an Amazon Auto Scaling policy. + OKActions []*string `type:"list"` + + // The period in seconds over which the specified statistic is applied. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The statistic to apply to the alarm's associated metric. + Statistic *string `type:"string" required:"true" enum:"Statistic"` + + // The value against which the specified statistic is compared. + Threshold *float64 `type:"double" required:"true"` + + // The unit for the alarm's associated metric. + Unit *string `type:"string" enum:"StandardUnit"` + + metadataPutMetricAlarmInput `json:"-" xml:"-"` +} + +type metadataPutMetricAlarmInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PutMetricAlarmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricAlarmInput) GoString() string { + return s.String() +} + +type PutMetricAlarmOutput struct { + metadataPutMetricAlarmOutput `json:"-" xml:"-"` +} + +type metadataPutMetricAlarmOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PutMetricAlarmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricAlarmOutput) GoString() string { + return s.String() +} + +type PutMetricDataInput struct { + // A list of data describing the metric. + MetricData []*MetricDatum `type:"list" required:"true"` + + // The namespace for the metric data. + Namespace *string `min:"1" type:"string" required:"true"` + + metadataPutMetricDataInput `json:"-" xml:"-"` +} + +type metadataPutMetricDataInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PutMetricDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricDataInput) GoString() string { + return s.String() +} + +type PutMetricDataOutput struct { + metadataPutMetricDataOutput `json:"-" xml:"-"` +} + +type metadataPutMetricDataOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PutMetricDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricDataOutput) GoString() string { + return s.String() +} + +type SetAlarmStateInput struct { + // The descriptive name for the alarm. This name must be unique within the user's + // AWS account. The maximum length is 255 characters. + AlarmName *string `min:"1" type:"string" required:"true"` + + // The reason that this alarm is set to this specific state (in human-readable + // text format) + StateReason *string `type:"string" required:"true"` + + // The reason that this alarm is set to this specific state (in machine-readable + // JSON format) + StateReasonData *string `type:"string"` + + // The value of the state. + StateValue *string `type:"string" required:"true" enum:"StateValue"` + + metadataSetAlarmStateInput `json:"-" xml:"-"` +} + +type metadataSetAlarmStateInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SetAlarmStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetAlarmStateInput) GoString() string { + return s.String() +} + +type SetAlarmStateOutput struct { + metadataSetAlarmStateOutput `json:"-" xml:"-"` +} + +type metadataSetAlarmStateOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SetAlarmStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetAlarmStateOutput) GoString() string { + return s.String() +} + +// The StatisticSet data type describes the StatisticValues component of MetricDatum, +// and represents a set of statistics that describes a specific metric. +type StatisticSet struct { + // The maximum value of the sample set. + Maximum *float64 `type:"double" required:"true"` + + // The minimum value of the sample set. + Minimum *float64 `type:"double" required:"true"` + + // The number of samples used for the statistic set. + SampleCount *float64 `type:"double" required:"true"` + + // The sum of values for the sample set. + Sum *float64 `type:"double" required:"true"` + + metadataStatisticSet `json:"-" xml:"-"` +} + +type metadataStatisticSet struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s StatisticSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatisticSet) GoString() string { + return s.String() +} + +const ( + // @enum ComparisonOperator + ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + // @enum ComparisonOperator + ComparisonOperatorGreaterThanThreshold = "GreaterThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanThreshold = "LessThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" +) + +const ( + // @enum HistoryItemType + HistoryItemTypeConfigurationUpdate = "ConfigurationUpdate" + // @enum HistoryItemType + HistoryItemTypeStateUpdate = "StateUpdate" + // @enum HistoryItemType + HistoryItemTypeAction = "Action" +) + +const ( + // @enum StandardUnit + StandardUnitSeconds = "Seconds" + // @enum StandardUnit + StandardUnitMicroseconds = "Microseconds" + // @enum StandardUnit + StandardUnitMilliseconds = "Milliseconds" + // @enum StandardUnit + StandardUnitBytes = "Bytes" + // @enum StandardUnit + StandardUnitKilobytes = "Kilobytes" + // @enum StandardUnit + StandardUnitMegabytes = "Megabytes" + // @enum StandardUnit + StandardUnitGigabytes = "Gigabytes" + // @enum StandardUnit + StandardUnitTerabytes = "Terabytes" + // @enum StandardUnit + StandardUnitBits = "Bits" + // @enum StandardUnit + StandardUnitKilobits = "Kilobits" + // @enum StandardUnit + StandardUnitMegabits = "Megabits" + // @enum StandardUnit + StandardUnitGigabits = "Gigabits" + // @enum StandardUnit + StandardUnitTerabits = "Terabits" + // @enum StandardUnit + StandardUnitPercent = "Percent" + // @enum StandardUnit + StandardUnitCount = "Count" + // @enum StandardUnit + StandardUnitBytesSecond = "Bytes/Second" + // @enum StandardUnit + StandardUnitKilobytesSecond = "Kilobytes/Second" + // @enum StandardUnit + StandardUnitMegabytesSecond = "Megabytes/Second" + // @enum StandardUnit + StandardUnitGigabytesSecond = "Gigabytes/Second" + // @enum StandardUnit + StandardUnitTerabytesSecond = "Terabytes/Second" + // @enum StandardUnit + StandardUnitBitsSecond = "Bits/Second" + // @enum StandardUnit + StandardUnitKilobitsSecond = "Kilobits/Second" + // @enum StandardUnit + StandardUnitMegabitsSecond = "Megabits/Second" + // @enum StandardUnit + StandardUnitGigabitsSecond = "Gigabits/Second" + // @enum StandardUnit + StandardUnitTerabitsSecond = "Terabits/Second" + // @enum StandardUnit + StandardUnitCountSecond = "Count/Second" + // @enum StandardUnit + StandardUnitNone = "None" +) + +const ( + // @enum StateValue + StateValueOk = "OK" + // @enum StateValue + StateValueAlarm = "ALARM" + // @enum StateValue + StateValueInsufficientData = "INSUFFICIENT_DATA" +) + +const ( + // @enum Statistic + StatisticSampleCount = "SampleCount" + // @enum Statistic + StatisticAverage = "Average" + // @enum Statistic + StatisticSum = "Sum" + // @enum Statistic + StatisticMinimum = "Minimum" + // @enum Statistic + StatisticMaximum = "Maximum" +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go new file mode 100644 index 0000000000000..140dc9e983261 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go @@ -0,0 +1,64 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchiface provides an interface for the Amazon CloudWatch. +package cloudwatchiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +// CloudWatchAPI is the interface type for cloudwatch.CloudWatch. +type CloudWatchAPI interface { + DeleteAlarmsRequest(*cloudwatch.DeleteAlarmsInput) (*request.Request, *cloudwatch.DeleteAlarmsOutput) + + DeleteAlarms(*cloudwatch.DeleteAlarmsInput) (*cloudwatch.DeleteAlarmsOutput, error) + + DescribeAlarmHistoryRequest(*cloudwatch.DescribeAlarmHistoryInput) (*request.Request, *cloudwatch.DescribeAlarmHistoryOutput) + + DescribeAlarmHistory(*cloudwatch.DescribeAlarmHistoryInput) (*cloudwatch.DescribeAlarmHistoryOutput, error) + + DescribeAlarmHistoryPages(*cloudwatch.DescribeAlarmHistoryInput, func(*cloudwatch.DescribeAlarmHistoryOutput, bool) bool) error + + DescribeAlarmsRequest(*cloudwatch.DescribeAlarmsInput) (*request.Request, *cloudwatch.DescribeAlarmsOutput) + + DescribeAlarms(*cloudwatch.DescribeAlarmsInput) (*cloudwatch.DescribeAlarmsOutput, error) + + DescribeAlarmsPages(*cloudwatch.DescribeAlarmsInput, func(*cloudwatch.DescribeAlarmsOutput, bool) bool) error + + DescribeAlarmsForMetricRequest(*cloudwatch.DescribeAlarmsForMetricInput) (*request.Request, *cloudwatch.DescribeAlarmsForMetricOutput) + + DescribeAlarmsForMetric(*cloudwatch.DescribeAlarmsForMetricInput) (*cloudwatch.DescribeAlarmsForMetricOutput, error) + + DisableAlarmActionsRequest(*cloudwatch.DisableAlarmActionsInput) (*request.Request, *cloudwatch.DisableAlarmActionsOutput) + + DisableAlarmActions(*cloudwatch.DisableAlarmActionsInput) (*cloudwatch.DisableAlarmActionsOutput, error) + + EnableAlarmActionsRequest(*cloudwatch.EnableAlarmActionsInput) (*request.Request, *cloudwatch.EnableAlarmActionsOutput) + + EnableAlarmActions(*cloudwatch.EnableAlarmActionsInput) (*cloudwatch.EnableAlarmActionsOutput, error) + + GetMetricStatisticsRequest(*cloudwatch.GetMetricStatisticsInput) (*request.Request, *cloudwatch.GetMetricStatisticsOutput) + + GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) + + ListMetricsRequest(*cloudwatch.ListMetricsInput) (*request.Request, *cloudwatch.ListMetricsOutput) + + ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) + + ListMetricsPages(*cloudwatch.ListMetricsInput, func(*cloudwatch.ListMetricsOutput, bool) bool) error + + PutMetricAlarmRequest(*cloudwatch.PutMetricAlarmInput) (*request.Request, *cloudwatch.PutMetricAlarmOutput) + + PutMetricAlarm(*cloudwatch.PutMetricAlarmInput) (*cloudwatch.PutMetricAlarmOutput, error) + + PutMetricDataRequest(*cloudwatch.PutMetricDataInput) (*request.Request, *cloudwatch.PutMetricDataOutput) + + PutMetricData(*cloudwatch.PutMetricDataInput) (*cloudwatch.PutMetricDataOutput, error) + + SetAlarmStateRequest(*cloudwatch.SetAlarmStateInput) (*request.Request, *cloudwatch.SetAlarmStateOutput) + + SetAlarmState(*cloudwatch.SetAlarmStateInput) (*cloudwatch.SetAlarmStateOutput, error) +} + +var _ CloudWatchAPI = (*cloudwatch.CloudWatch)(nil) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go new file mode 100644 index 0000000000000..07b010852b928 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go @@ -0,0 +1,337 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatch_DeleteAlarms() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DeleteAlarmsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.DeleteAlarms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarmHistory() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmHistoryInput{ + AlarmName: aws.String("AlarmName"), + EndDate: aws.Time(time.Now()), + HistoryItemType: aws.String("HistoryItemType"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartDate: aws.Time(time.Now()), + } + resp, err := svc.DescribeAlarmHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarms() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmsInput{ + ActionPrefix: aws.String("ActionPrefix"), + AlarmNamePrefix: aws.String("AlarmNamePrefix"), + AlarmNames: []*string{ + aws.String("AlarmName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("NextToken"), + StateValue: aws.String("StateValue"), + } + resp, err := svc.DescribeAlarms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarmsForMetric() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmsForMetricInput{ + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + Period: aws.Int64(1), + Statistic: aws.String("Statistic"), + Unit: aws.String("StandardUnit"), + } + resp, err := svc.DescribeAlarmsForMetric(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DisableAlarmActions() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DisableAlarmActionsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.DisableAlarmActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_EnableAlarmActions() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.EnableAlarmActionsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.EnableAlarmActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_GetMetricStatistics() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.GetMetricStatisticsInput{ + EndTime: aws.Time(time.Now()), // Required + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Period: aws.Int64(1), // Required + StartTime: aws.Time(time.Now()), // Required + Statistics: []*string{ // Required + aws.String("Statistic"), // Required + // More values... + }, + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + Unit: aws.String("StandardUnit"), + } + resp, err := svc.GetMetricStatistics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_ListMetrics() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.ListMetricsInput{ + Dimensions: []*cloudwatch.DimensionFilter{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), + }, + // More values... + }, + MetricName: aws.String("MetricName"), + Namespace: aws.String("Namespace"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListMetrics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_PutMetricAlarm() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.PutMetricAlarmInput{ + AlarmName: aws.String("AlarmName"), // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + EvaluationPeriods: aws.Int64(1), // Required + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Period: aws.Int64(1), // Required + Statistic: aws.String("Statistic"), // Required + Threshold: aws.Float64(1.0), // Required + ActionsEnabled: aws.Bool(true), + AlarmActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + AlarmDescription: aws.String("AlarmDescription"), + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + InsufficientDataActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + OKActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + Unit: aws.String("StandardUnit"), + } + resp, err := svc.PutMetricAlarm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_PutMetricData() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.PutMetricDataInput{ + MetricData: []*cloudwatch.MetricDatum{ // Required + { // Required + MetricName: aws.String("MetricName"), // Required + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + StatisticValues: &cloudwatch.StatisticSet{ + Maximum: aws.Float64(1.0), // Required + Minimum: aws.Float64(1.0), // Required + SampleCount: aws.Float64(1.0), // Required + Sum: aws.Float64(1.0), // Required + }, + Timestamp: aws.Time(time.Now()), + Unit: aws.String("StandardUnit"), + Value: aws.Float64(1.0), + }, + // More values... + }, + Namespace: aws.String("Namespace"), // Required + } + resp, err := svc.PutMetricData(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_SetAlarmState() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.SetAlarmStateInput{ + AlarmName: aws.String("AlarmName"), // Required + StateReason: aws.String("StateReason"), // Required + StateValue: aws.String("StateValue"), // Required + StateReasonData: aws.String("StateReasonData"), + } + resp, err := svc.SetAlarmState(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go new file mode 100644 index 0000000000000..e6f8506846050 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go @@ -0,0 +1,125 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the Amazon CloudWatch API Reference. This guide provides detailed +// information about Amazon CloudWatch actions, data types, parameters, and +// errors. For detailed information about Amazon CloudWatch features and their +// associated API calls, go to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide). +// +// Amazon CloudWatch is a web service that enables you to publish, monitor, +// and manage various metrics, as well as configure alarm actions based on data +// from metrics. For more information about this product go to http://aws.amazon.com/cloudwatch +// (http://aws.amazon.com/cloudwatch). +// +// For information about the namespace, metric names, and dimensions that +// other Amazon Web Services products use to send metrics to Cloudwatch, go +// to Amazon CloudWatch Metrics, Namespaces, and Dimensions Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) +// in the Amazon CloudWatch Developer Guide. +// +// Use the following links to get started using the Amazon CloudWatch API Reference: +// +// Actions (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Operations.html): +// An alphabetical list of all Amazon CloudWatch actions. Data Types (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Types.html): +// An alphabetical list of all Amazon CloudWatch data types. Common Parameters +// (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CommonParameters.html): +// Parameters that all Query actions can use. Common Errors (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CommonErrors.html): +// Client and server errors that all actions can return. Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized +// regions and endpoints for all AWS products. WSDL Location (http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl): +// http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl In addition +// to using the Amazon CloudWatch API, you can also use the following SDKs and +// third-party libraries to access Amazon CloudWatch programmatically. +// +// AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/) +// AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/) +// AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/) +// AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/) +// Developers in the AWS developer community also provide their own libraries, +// which you can find at the following AWS developer centers: +// +// AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer +// Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/) +// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET +// Developer Center (http://aws.amazon.com/net/) +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatch struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "monitoring" + +// New creates a new instance of the CloudWatch client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatch client from just a session. +// svc := cloudwatch.New(mySession) +// +// // Create a CloudWatch client with additional configuration +// svc := cloudwatch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatch { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatch { + svc := &CloudWatch{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-08-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(query.Build) + svc.Handlers.Unmarshal.PushBack(query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatch operation and runs any +// custom request initialization. +func (c *CloudWatch) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/api.go new file mode 100644 index 0000000000000..337392a86f298 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -0,0 +1,24756 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ec2 provides a client for Amazon Elastic Compute Cloud. +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" + +// AcceptVpcPeeringConnectionRequest generates a request for the AcceptVpcPeeringConnection operation. +func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectionInput) (req *request.Request, output *AcceptVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opAcceptVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &AcceptVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Accept a VPC peering connection request. To accept a request, the VPC peering +// connection must be in the pending-acceptance state, and you must be the owner +// of the peer VPC. Use the DescribeVpcPeeringConnections request to view your +// outstanding VPC peering connection requests. +func (c *EC2) AcceptVpcPeeringConnection(input *AcceptVpcPeeringConnectionInput) (*AcceptVpcPeeringConnectionOutput, error) { + req, out := c.AcceptVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opAllocateAddress = "AllocateAddress" + +// AllocateAddressRequest generates a request for the AllocateAddress operation. +func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request.Request, output *AllocateAddressOutput) { + op := &request.Operation{ + Name: opAllocateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &AllocateAddressOutput{} + req.Data = output + return +} + +// Acquires an Elastic IP address. +// +// An Elastic IP address is for use either in the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) AllocateAddress(input *AllocateAddressInput) (*AllocateAddressOutput, error) { + req, out := c.AllocateAddressRequest(input) + err := req.Send() + return out, err +} + +const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" + +// AssignPrivateIpAddressesRequest generates a request for the AssignPrivateIpAddresses operation. +func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInput) (req *request.Request, output *AssignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opAssignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignPrivateIpAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &AssignPrivateIpAddressesOutput{} + req.Data = output + return +} + +// Assigns one or more secondary private IP addresses to the specified network +// interface. You can specify one or more specific secondary IP addresses, or +// you can specify the number of secondary IP addresses to be automatically +// assigned within the subnet's CIDR block range. The number of secondary IP +// addresses that you can assign to an instance varies by instance type. For +// information about instance types, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// Elastic IP addresses, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// AssignPrivateIpAddresses is available only in EC2-VPC. +func (c *EC2) AssignPrivateIpAddresses(input *AssignPrivateIpAddressesInput) (*AssignPrivateIpAddressesOutput, error) { + req, out := c.AssignPrivateIpAddressesRequest(input) + err := req.Send() + return out, err +} + +const opAssociateAddress = "AssociateAddress" + +// AssociateAddressRequest generates a request for the AssociateAddress operation. +func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *request.Request, output *AssociateAddressOutput) { + op := &request.Operation{ + Name: opAssociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateAddressOutput{} + req.Data = output + return +} + +// Associates an Elastic IP address with an instance or a network interface. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address +// is already associated with a different instance, it is disassociated from +// that instance and associated with the specified instance. +// +// [VPC in an EC2-Classic account] If you don't specify a private IP address, +// the Elastic IP address is associated with the primary IP address. If the +// Elastic IP address is already associated with a different instance or a network +// interface, you get an error unless you allow reassociation. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +func (c *EC2) AssociateAddress(input *AssociateAddressInput) (*AssociateAddressOutput, error) { + req, out := c.AssociateAddressRequest(input) + err := req.Send() + return out, err +} + +const opAssociateDhcpOptions = "AssociateDhcpOptions" + +// AssociateDhcpOptionsRequest generates a request for the AssociateDhcpOptions operation. +func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req *request.Request, output *AssociateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opAssociateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateDhcpOptionsOutput{} + req.Data = output + return +} + +// Associates a set of DHCP options (that you've previously created) with the +// specified VPC, or associates no DHCP options with the VPC. +// +// After you associate the options with the VPC, any existing instances and +// all new instances that you launch in that VPC use the options. You don't +// need to restart or relaunch the instances. They automatically pick up the +// changes within a few hours, depending on how frequently the instance renews +// its DHCP lease. You can explicitly renew the lease using the operating system +// on the instance. +// +// For more information, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AssociateDhcpOptions(input *AssociateDhcpOptionsInput) (*AssociateDhcpOptionsOutput, error) { + req, out := c.AssociateDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opAssociateRouteTable = "AssociateRouteTable" + +// AssociateRouteTableRequest generates a request for the AssociateRouteTable operation. +func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req *request.Request, output *AssociateRouteTableOutput) { + op := &request.Operation{ + Name: opAssociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateRouteTableOutput{} + req.Data = output + return +} + +// Associates a subnet with a route table. The subnet and route table must be +// in the same VPC. This association causes traffic originating from the subnet +// to be routed according to the routes in the route table. The action returns +// an association ID, which you need in order to disassociate the route table +// from the subnet later. A route table can be associated with multiple subnets. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AssociateRouteTable(input *AssociateRouteTableInput) (*AssociateRouteTableOutput, error) { + req, out := c.AssociateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opAttachClassicLinkVpc = "AttachClassicLinkVpc" + +// AttachClassicLinkVpcRequest generates a request for the AttachClassicLinkVpc operation. +func (c *EC2) AttachClassicLinkVpcRequest(input *AttachClassicLinkVpcInput) (req *request.Request, output *AttachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opAttachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachClassicLinkVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachClassicLinkVpcOutput{} + req.Data = output + return +} + +// Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or +// more of the VPC's security groups. You cannot link an EC2-Classic instance +// to more than one VPC at a time. You can only link an instance that's in the +// running state. An instance is automatically unlinked from a VPC when it's +// stopped - you can link it to the VPC again when you restart it. +// +// After you've linked an instance, you cannot change the VPC security groups +// that are associated with it. To change the security groups, you must first +// unlink the instance, and then link it again. +// +// Linking your instance to a VPC is sometimes referred to as attaching your +// instance. +func (c *EC2) AttachClassicLinkVpc(input *AttachClassicLinkVpcInput) (*AttachClassicLinkVpcOutput, error) { + req, out := c.AttachClassicLinkVpcRequest(input) + err := req.Send() + return out, err +} + +const opAttachInternetGateway = "AttachInternetGateway" + +// AttachInternetGatewayRequest generates a request for the AttachInternetGateway operation. +func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (req *request.Request, output *AttachInternetGatewayOutput) { + op := &request.Operation{ + Name: opAttachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachInternetGatewayOutput{} + req.Data = output + return +} + +// Attaches an Internet gateway to a VPC, enabling connectivity between the +// Internet and the VPC. For more information about your VPC and Internet gateway, +// see the Amazon Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +func (c *EC2) AttachInternetGateway(input *AttachInternetGatewayInput) (*AttachInternetGatewayOutput, error) { + req, out := c.AttachInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAttachNetworkInterface = "AttachNetworkInterface" + +// AttachNetworkInterfaceRequest generates a request for the AttachNetworkInterface operation. +func (c *EC2) AttachNetworkInterfaceRequest(input *AttachNetworkInterfaceInput) (req *request.Request, output *AttachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opAttachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachNetworkInterfaceOutput{} + req.Data = output + return +} + +// Attaches a network interface to an instance. +func (c *EC2) AttachNetworkInterface(input *AttachNetworkInterfaceInput) (*AttachNetworkInterfaceOutput, error) { + req, out := c.AttachNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opAttachVolume = "AttachVolume" + +// AttachVolumeRequest generates a request for the AttachVolume operation. +func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opAttachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &VolumeAttachment{} + req.Data = output + return +} + +// Attaches an EBS volume to a running or stopped instance and exposes it to +// the instance with the specified device name. +// +// Encrypted EBS volumes may only be attached to instances that support Amazon +// EBS encryption. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For a list of supported device names, see Attaching an EBS Volume to an +// Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html). +// Any device names that aren't reserved for instance store volumes can be used +// for EBS volumes. For more information, see Amazon EC2 Instance Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If a volume has an AWS Marketplace product code: +// +// The volume can be attached only to a stopped instance. AWS Marketplace +// product codes are copied from the volume to the instance. You must be subscribed +// to the product. The instance type and operating system of the instance must +// support the product. For example, you can't detach a volume from a Windows +// instance and attach it to a Linux instance. For an overview of the AWS Marketplace, +// see Introducing AWS Marketplace (https://aws.amazon.com/marketplace/help/200900000). +// +// For more information about EBS volumes, see Attaching Amazon EBS Volumes +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) AttachVolume(input *AttachVolumeInput) (*VolumeAttachment, error) { + req, out := c.AttachVolumeRequest(input) + err := req.Send() + return out, err +} + +const opAttachVpnGateway = "AttachVpnGateway" + +// AttachVpnGatewayRequest generates a request for the AttachVpnGateway operation. +func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *request.Request, output *AttachVpnGatewayOutput) { + op := &request.Operation{ + Name: opAttachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachVpnGatewayOutput{} + req.Data = output + return +} + +// Attaches a virtual private gateway to a VPC. For more information, see Adding +// a Hardware Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AttachVpnGateway(input *AttachVpnGatewayInput) (*AttachVpnGatewayOutput, error) { + req, out := c.AttachVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" + +// AuthorizeSecurityGroupEgressRequest generates a request for the AuthorizeSecurityGroupEgress operation. +func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupEgressInput) (req *request.Request, output *AuthorizeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupEgressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeSecurityGroupEgressOutput{} + req.Data = output + return +} + +// Adds one or more egress rules to a security group for use with a VPC. Specifically, +// this action permits instances to send traffic to one or more destination +// CIDR IP address ranges, or to one or more destination security groups for +// the same VPC. +// +// You can have up to 50 rules per security group (covering both ingress and +// egress rules). +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. This action doesn't apply to security groups for use +// in EC2-Classic. For more information, see Security Groups for Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Each rule consists of the protocol (for example, TCP), plus either a CIDR +// range or a source group. For the TCP and UDP protocols, you must also specify +// the destination port or port range. For the ICMP protocol, you must also +// specify the ICMP type and code. You can use -1 for the type or code to mean +// all types or all codes. +// +// Rule changes are propagated to affected instances as quickly as possible. +// However, a small delay might occur. +func (c *EC2) AuthorizeSecurityGroupEgress(input *AuthorizeSecurityGroupEgressInput) (*AuthorizeSecurityGroupEgressOutput, error) { + req, out := c.AuthorizeSecurityGroupEgressRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" + +// AuthorizeSecurityGroupIngressRequest generates a request for the AuthorizeSecurityGroupIngress operation. +func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroupIngressInput) (req *request.Request, output *AuthorizeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Adds one or more ingress rules to a security group. +// +// EC2-Classic: You can have up to 100 rules per group. +// +// EC2-VPC: You can have up to 50 rules per group (covering both ingress and +// egress rules). +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +// +// [EC2-Classic] This action gives one or more CIDR IP address ranges permission +// to access a security group in your account, or gives one or more security +// groups (called the source groups) permission to access a security group for +// your account. A source group can be for your own AWS account, or another. +// +// [EC2-VPC] This action gives one or more CIDR IP address ranges permission +// to access a security group in your VPC, or gives one or more other security +// groups (called the source groups) permission to access a security group for +// your VPC. The security groups must all be for the same VPC. +func (c *EC2) AuthorizeSecurityGroupIngress(input *AuthorizeSecurityGroupIngressInput) (*AuthorizeSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opBundleInstance = "BundleInstance" + +// BundleInstanceRequest generates a request for the BundleInstance operation. +func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Request, output *BundleInstanceOutput) { + op := &request.Operation{ + Name: opBundleInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BundleInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &BundleInstanceOutput{} + req.Data = output + return +} + +// Bundles an Amazon instance store-backed Windows instance. +// +// During bundling, only the root device volume (C:\) is bundled. Data on other +// instance store volumes is not preserved. +// +// This action is not applicable for Linux/Unix instances or Windows instances +// that are backed by Amazon EBS. +// +// For more information, see Creating an Instance Store-Backed Windows AMI +// (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_InstanceStoreBacked_WinAMI.html). +func (c *EC2) BundleInstance(input *BundleInstanceInput) (*BundleInstanceOutput, error) { + req, out := c.BundleInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCancelBundleTask = "CancelBundleTask" + +// CancelBundleTaskRequest generates a request for the CancelBundleTask operation. +func (c *EC2) CancelBundleTaskRequest(input *CancelBundleTaskInput) (req *request.Request, output *CancelBundleTaskOutput) { + op := &request.Operation{ + Name: opCancelBundleTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelBundleTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelBundleTaskOutput{} + req.Data = output + return +} + +// Cancels a bundling operation for an instance store-backed Windows instance. +func (c *EC2) CancelBundleTask(input *CancelBundleTaskInput) (*CancelBundleTaskOutput, error) { + req, out := c.CancelBundleTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelConversionTask = "CancelConversionTask" + +// CancelConversionTaskRequest generates a request for the CancelConversionTask operation. +func (c *EC2) CancelConversionTaskRequest(input *CancelConversionTaskInput) (req *request.Request, output *CancelConversionTaskOutput) { + op := &request.Operation{ + Name: opCancelConversionTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelConversionTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelConversionTaskOutput{} + req.Data = output + return +} + +// Cancels an active conversion task. The task can be the import of an instance +// or volume. The action removes all artifacts of the conversion, including +// a partially uploaded volume or instance. If the conversion is complete or +// is in the process of transferring the final disk image, the command fails +// and returns an exception. +// +// For more information, see Using the Command Line Tools to Import Your Virtual +// Machine to Amazon EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CancelConversionTask(input *CancelConversionTaskInput) (*CancelConversionTaskOutput, error) { + req, out := c.CancelConversionTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a request for the CancelExportTask operation. +func (c *EC2) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelExportTaskOutput{} + req.Data = output + return +} + +// Cancels an active export task. The request removes all artifacts of the export, +// including any partially-created Amazon S3 objects. If the export task is +// complete or is in the process of transferring the final disk image, the command +// fails and returns an error. +func (c *EC2) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelImportTask = "CancelImportTask" + +// CancelImportTaskRequest generates a request for the CancelImportTask operation. +func (c *EC2) CancelImportTaskRequest(input *CancelImportTaskInput) (req *request.Request, output *CancelImportTaskOutput) { + op := &request.Operation{ + Name: opCancelImportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelImportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelImportTaskOutput{} + req.Data = output + return +} + +// Cancels an in-process import virtual machine or import snapshot task. +func (c *EC2) CancelImportTask(input *CancelImportTaskInput) (*CancelImportTaskOutput, error) { + req, out := c.CancelImportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelReservedInstancesListing = "CancelReservedInstancesListing" + +// CancelReservedInstancesListingRequest generates a request for the CancelReservedInstancesListing operation. +func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstancesListingInput) (req *request.Request, output *CancelReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCancelReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelReservedInstancesListingInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelReservedInstancesListingOutput{} + req.Data = output + return +} + +// Cancels the specified Reserved Instance listing in the Reserved Instance +// Marketplace. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CancelReservedInstancesListing(input *CancelReservedInstancesListingInput) (*CancelReservedInstancesListingOutput, error) { + req, out := c.CancelReservedInstancesListingRequest(input) + err := req.Send() + return out, err +} + +const opCancelSpotFleetRequests = "CancelSpotFleetRequests" + +// CancelSpotFleetRequestsRequest generates a request for the CancelSpotFleetRequests operation. +func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput) (req *request.Request, output *CancelSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotFleetRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelSpotFleetRequestsOutput{} + req.Data = output + return +} + +// Cancels the specified Spot fleet requests. +// +// After you cancel a Spot fleet request, the Spot fleet launches no new Spot +// instances. You must specify whether the Spot fleet should also terminate +// its Spot instances. If you terminate the instances, the Spot fleet request +// enters the cancelled_terminating state. Otherwise, the Spot fleet request +// enters the cancelled_running state and the instances continue to run until +// they are interrupted or you terminate them manually. +func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*CancelSpotFleetRequestsOutput, error) { + req, out := c.CancelSpotFleetRequestsRequest(input) + err := req.Send() + return out, err +} + +const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" + +// CancelSpotInstanceRequestsRequest generates a request for the CancelSpotInstanceRequests operation. +func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequestsInput) (req *request.Request, output *CancelSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotInstanceRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelSpotInstanceRequestsOutput{} + req.Data = output + return +} + +// Cancels one or more Spot instance requests. Spot instances are instances +// that Amazon EC2 starts on your behalf when the bid price that you specify +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price +// based on available Spot instance capacity and current Spot instance requests. +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Canceling a Spot instance request does not terminate running Spot instances +// associated with the request. +func (c *EC2) CancelSpotInstanceRequests(input *CancelSpotInstanceRequestsInput) (*CancelSpotInstanceRequestsOutput, error) { + req, out := c.CancelSpotInstanceRequestsRequest(input) + err := req.Send() + return out, err +} + +const opConfirmProductInstance = "ConfirmProductInstance" + +// ConfirmProductInstanceRequest generates a request for the ConfirmProductInstance operation. +func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) (req *request.Request, output *ConfirmProductInstanceOutput) { + op := &request.Operation{ + Name: opConfirmProductInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmProductInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmProductInstanceOutput{} + req.Data = output + return +} + +// Determines whether a product code is associated with an instance. This action +// can only be used by the owner of the product code. It is useful when a product +// code owner needs to verify whether another user's instance is eligible for +// support. +func (c *EC2) ConfirmProductInstance(input *ConfirmProductInstanceInput) (*ConfirmProductInstanceOutput, error) { + req, out := c.ConfirmProductInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCopyImage = "CopyImage" + +// CopyImageRequest generates a request for the CopyImage operation. +func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, output *CopyImageOutput) { + op := &request.Operation{ + Name: opCopyImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyImageInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyImageOutput{} + req.Data = output + return +} + +// Initiates the copy of an AMI from the specified source region to the current +// region. You specify the destination region by using its endpoint when making +// the request. AMIs that use encrypted EBS snapshots cannot be copied with +// this method. +// +// For more information, see Copying AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + err := req.Send() + return out, err +} + +const opCopySnapshot = "CopySnapshot" + +// CopySnapshotRequest generates a request for the CopySnapshot operation. +func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { + op := &request.Operation{ + Name: opCopySnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopySnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopySnapshotOutput{} + req.Data = output + return +} + +// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon +// S3. You can copy the snapshot within the same region or from one region to +// another. You can use the snapshot to create EBS volumes or Amazon Machine +// Images (AMIs). The snapshot is copied to the regional endpoint that you send +// the HTTP request to. +// +// Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted +// snapshots remain unencrypted, unless the Encrypted flag is specified during +// the snapshot copy operation. By default, encrypted snapshot copies use the +// default AWS Key Management Service (AWS KMS) customer master key (CMK); however, +// you can specify a non-default CMK with the KmsKeyId parameter. +// +// For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCustomerGateway = "CreateCustomerGateway" + +// CreateCustomerGatewayRequest generates a request for the CreateCustomerGateway operation. +func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (req *request.Request, output *CreateCustomerGatewayOutput) { + op := &request.Operation{ + Name: opCreateCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomerGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCustomerGatewayOutput{} + req.Data = output + return +} + +// Provides information to AWS about your VPN customer gateway device. The customer +// gateway is the appliance at your end of the VPN connection. (The device on +// the AWS side of the VPN connection is the virtual private gateway.) You must +// provide the Internet-routable IP address of the customer gateway's external +// interface. The IP address must be static and may be behind a device performing +// network address translation (NAT). +// +// For devices that use Border Gateway Protocol (BGP), you can also provide +// the device's BGP Autonomous System Number (ASN). You can use an existing +// ASN assigned to your network. If you don't have an ASN already, you can use +// a private ASN (in the 64512 - 65534 range). +// +// Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with +// the exception of 7224, which is reserved in the us-east-1 region, and 9059, +// which is reserved in the eu-west-1 region. +// +// For more information about VPN customer gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You cannot create more than one customer gateway with the same VPN type, +// IP address, and BGP ASN parameter values. If you run an identical request +// more than one time, the first request creates the customer gateway, and subsequent +// requests return information about the existing customer gateway. The subsequent +// requests do not create new customer gateway resources. +func (c *EC2) CreateCustomerGateway(input *CreateCustomerGatewayInput) (*CreateCustomerGatewayOutput, error) { + req, out := c.CreateCustomerGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateDhcpOptions = "CreateDhcpOptions" + +// CreateDhcpOptionsRequest generates a request for the CreateDhcpOptions operation. +func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *request.Request, output *CreateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opCreateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDhcpOptionsOutput{} + req.Data = output + return +} + +// Creates a set of DHCP options for your VPC. After creating the set, you must +// associate it with the VPC, causing all existing and new instances that you +// launch in the VPC to use this set of DHCP options. The following are the +// individual DHCP options you can specify. For more information about the options, +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). +// +// domain-name-servers - The IP addresses of up to four domain name servers, +// or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. +// If specifying more than one domain name server, specify the IP addresses +// in a single parameter, separated by commas. domain-name - If you're using +// AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS +// in another region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). +// Otherwise, specify a domain name (for example, MyCompany.com). Important: +// Some Linux operating systems accept multiple domain names separated by spaces. +// However, Windows and other Linux operating systems treat the value as a single +// domain, which results in unexpected behavior. If your DHCP options set is +// associated with a VPC that has instances with multiple operating systems, +// specify only one domain name. ntp-servers - The IP addresses of up to four +// Network Time Protocol (NTP) servers. netbios-name-servers - The IP addresses +// of up to four NetBIOS name servers. netbios-node-type - The NetBIOS node +// type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast +// are not currently supported). For more information about these node types, +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). Your VPC automatically +// starts out with a set of DHCP options that includes only a DNS server that +// we provide (AmazonProvidedDNS). If you create a set of options, and if your +// VPC has an Internet gateway, make sure to set the domain-name-servers option +// either to AmazonProvidedDNS or to a domain name server of your choice. For +// more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateDhcpOptions(input *CreateDhcpOptionsInput) (*CreateDhcpOptionsOutput, error) { + req, out := c.CreateDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opCreateFlowLogs = "CreateFlowLogs" + +// CreateFlowLogsRequest generates a request for the CreateFlowLogs operation. +func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Request, output *CreateFlowLogsOutput) { + op := &request.Operation{ + Name: opCreateFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateFlowLogsOutput{} + req.Data = output + return +} + +// Creates one or more flow logs to capture IP traffic for a specific network +// interface, subnet, or VPC. Flow logs are delivered to a specified log group +// in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, +// a log stream is created in CloudWatch Logs for each network interface in +// the subnet or VPC. Log streams can include information about accepted and +// rejected traffic to a network interface. You can view the data in your log +// streams using Amazon CloudWatch Logs. +// +// In your request, you must also specify an IAM role that has permission to +// publish logs to CloudWatch Logs. +func (c *EC2) CreateFlowLogs(input *CreateFlowLogsInput) (*CreateFlowLogsOutput, error) { + req, out := c.CreateFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opCreateImage = "CreateImage" + +// CreateImageRequest generates a request for the CreateImage operation. +func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, output *CreateImageOutput) { + op := &request.Operation{ + Name: opCreateImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateImageInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateImageOutput{} + req.Data = output + return +} + +// Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that +// is either running or stopped. +// +// If you customized your instance with instance store volumes or EBS volumes +// in addition to the root device volume, the new AMI contains block device +// mapping information for those volumes. When you launch an instance from this +// new AMI, the instance automatically launches with those additional volumes. +// +// For more information, see Creating Amazon EBS-Backed Linux AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) { + req, out := c.CreateImageRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstanceExportTask = "CreateInstanceExportTask" + +// CreateInstanceExportTaskRequest generates a request for the CreateInstanceExportTask operation. +func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInput) (req *request.Request, output *CreateInstanceExportTaskOutput) { + op := &request.Operation{ + Name: opCreateInstanceExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceExportTaskOutput{} + req.Data = output + return +} + +// Exports a running or stopped instance to an S3 bucket. +// +// For information about the supported operating systems, image formats, and +// known limitations for the types of instances you can export, see Exporting +// EC2 Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ExportingEC2Instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateInstanceExportTask(input *CreateInstanceExportTaskInput) (*CreateInstanceExportTaskOutput, error) { + req, out := c.CreateInstanceExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateInternetGateway = "CreateInternetGateway" + +// CreateInternetGatewayRequest generates a request for the CreateInternetGateway operation. +func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (req *request.Request, output *CreateInternetGatewayOutput) { + op := &request.Operation{ + Name: opCreateInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInternetGatewayOutput{} + req.Data = output + return +} + +// Creates an Internet gateway for use with a VPC. After creating the Internet +// gateway, you attach it to a VPC using AttachInternetGateway. +// +// For more information about your VPC and Internet gateway, see the Amazon +// Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +func (c *EC2) CreateInternetGateway(input *CreateInternetGatewayInput) (*CreateInternetGatewayOutput, error) { + req, out := c.CreateInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateKeyPair = "CreateKeyPair" + +// CreateKeyPairRequest generates a request for the CreateKeyPair operation. +func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Request, output *CreateKeyPairOutput) { + op := &request.Operation{ + Name: opCreateKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateKeyPairOutput{} + req.Data = output + return +} + +// Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores +// the public key and displays the private key for you to save to a file. The +// private key is returned as an unencrypted PEM encoded PKCS#8 private key. +// If a key with the specified name already exists, Amazon EC2 returns an error. +// +// You can have up to five thousand key pairs per region. +// +// The key pair returned to you is available only in the region in which you +// create it. To create a key pair that is available in all regions, use ImportKeyPair. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, error) { + req, out := c.CreateKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkAcl = "CreateNetworkAcl" + +// CreateNetworkAclRequest generates a request for the CreateNetworkAcl operation. +func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *request.Request, output *CreateNetworkAclOutput) { + op := &request.Operation{ + Name: opCreateNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkAclOutput{} + req.Data = output + return +} + +// Creates a network ACL in a VPC. Network ACLs provide an optional layer of +// security (in addition to security groups) for the instances in your VPC. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNetworkAcl(input *CreateNetworkAclInput) (*CreateNetworkAclOutput, error) { + req, out := c.CreateNetworkAclRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkAclEntry = "CreateNetworkAclEntry" + +// CreateNetworkAclEntryRequest generates a request for the CreateNetworkAclEntry operation. +func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (req *request.Request, output *CreateNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opCreateNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkAclEntryOutput{} + req.Data = output + return +} + +// Creates an entry (a rule) in a network ACL with the specified rule number. +// Each network ACL has a set of numbered ingress rules and a separate set of +// numbered egress rules. When determining whether a packet should be allowed +// in or out of a subnet associated with the ACL, we process the entries in +// the ACL according to the rule numbers, in ascending order. Each network ACL +// has a set of ingress rules and a separate set of egress rules. +// +// We recommend that you leave room between the rule numbers (for example, +// 100, 110, 120, ...), and not number them one right after the other (for example, +// 101, 102, 103, ...). This makes it easier to add a rule between existing +// ones without having to renumber the rules. +// +// After you add an entry, you can't modify it; you must either replace it, +// or create an entry and delete the old one. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNetworkAclEntry(input *CreateNetworkAclEntryInput) (*CreateNetworkAclEntryOutput, error) { + req, out := c.CreateNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkInterface = "CreateNetworkInterface" + +// CreateNetworkInterfaceRequest generates a request for the CreateNetworkInterface operation. +func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) (req *request.Request, output *CreateNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opCreateNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkInterfaceOutput{} + req.Data = output + return +} + +// Creates a network interface in the specified subnet. +// +// For more information about network interfaces, see Elastic Network Interfaces +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateNetworkInterface(input *CreateNetworkInterfaceInput) (*CreateNetworkInterfaceOutput, error) { + req, out := c.CreateNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlacementGroup = "CreatePlacementGroup" + +// CreatePlacementGroupRequest generates a request for the CreatePlacementGroup operation. +func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req *request.Request, output *CreatePlacementGroupOutput) { + op := &request.Operation{ + Name: opCreatePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlacementGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlacementGroupOutput{} + req.Data = output + return +} + +// Creates a placement group that you launch cluster instances into. You must +// give the group a name that's unique within the scope of your account. +// +// For more information about placement groups and cluster instances, see Cluster +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreatePlacementGroup(input *CreatePlacementGroupInput) (*CreatePlacementGroupOutput, error) { + req, out := c.CreatePlacementGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateReservedInstancesListing = "CreateReservedInstancesListing" + +// CreateReservedInstancesListingRequest generates a request for the CreateReservedInstancesListing operation. +func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstancesListingInput) (req *request.Request, output *CreateReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCreateReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReservedInstancesListingInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReservedInstancesListingOutput{} + req.Data = output + return +} + +// Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved +// Instance Marketplace. You can submit one Reserved Instance listing at a time. +// To get a list of your Reserved Instances, you can use the DescribeReservedInstances +// operation. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// To sell your Reserved Instances, you must first register as a seller in +// the Reserved Instance Marketplace. After completing the registration process, +// you can create a Reserved Instance Marketplace listing of some or all of +// your Reserved Instances, and specify the upfront price to receive for them. +// Your Reserved Instance listings then become available for purchase. To view +// the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings +// operation. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateReservedInstancesListing(input *CreateReservedInstancesListingInput) (*CreateReservedInstancesListingOutput, error) { + req, out := c.CreateReservedInstancesListingRequest(input) + err := req.Send() + return out, err +} + +const opCreateRoute = "CreateRoute" + +// CreateRouteRequest generates a request for the CreateRoute operation. +func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, output *CreateRouteOutput) { + op := &request.Operation{ + Name: opCreateRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRouteOutput{} + req.Data = output + return +} + +// Creates a route in a route table within a VPC. +// +// You must specify one of the following targets: Internet gateway or virtual +// private gateway, NAT instance, VPC peering connection, or network interface. +// +// When determining how to route traffic, we use the route with the most specific +// match. For example, let's say the traffic is destined for 192.0.2.3, and +// the route table includes the following two routes: +// +// 192.0.2.0/24 (goes to some target A) +// +// 192.0.2.0/28 (goes to some target B) +// +// Both routes apply to the traffic destined for 192.0.2.3. However, the +// second route in the list covers a smaller number of IP addresses and is therefore +// more specific, so we use that route to determine where to target the traffic. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateRoute(input *CreateRouteInput) (*CreateRouteOutput, error) { + req, out := c.CreateRouteRequest(input) + err := req.Send() + return out, err +} + +const opCreateRouteTable = "CreateRouteTable" + +// CreateRouteTableRequest generates a request for the CreateRouteTable operation. +func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *request.Request, output *CreateRouteTableOutput) { + op := &request.Operation{ + Name: opCreateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRouteTableOutput{} + req.Data = output + return +} + +// Creates a route table for the specified VPC. After you create a route table, +// you can add routes and associate the table with a subnet. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateRouteTable(input *CreateRouteTableInput) (*CreateRouteTableOutput, error) { + req, out := c.CreateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opCreateSecurityGroup = "CreateSecurityGroup" + +// CreateSecurityGroupRequest generates a request for the CreateSecurityGroup operation. +func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req *request.Request, output *CreateSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a security group. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// EC2-Classic: You can have up to 500 security groups. +// +// EC2-VPC: You can create up to 100 security groups per VPC. +// +// When you create a security group, you specify a friendly name of your choice. +// You can have a security group for use in EC2-Classic with the same name as +// a security group for use in a VPC. However, you can't have two security groups +// for use in EC2-Classic with the same name or two security groups for use +// in a VPC with the same name. +// +// You have a default security group for use in EC2-Classic and a default security +// group for use in your VPC. If you don't specify a security group when you +// launch an instance, the instance is launched into the appropriate default +// security group. A default security group includes a default rule that grants +// instances unrestricted network access to each other. +// +// You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, +// AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress. +func (c *EC2) CreateSecurityGroup(input *CreateSecurityGroupInput) (*CreateSecurityGroupOutput, error) { + req, out := c.CreateSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *Snapshot) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &Snapshot{} + req.Data = output + return +} + +// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use +// snapshots for backups, to make copies of EBS volumes, and to save data before +// shutting down an instance. +// +// When a snapshot is created, any AWS Marketplace product codes that are associated +// with the source volume are propagated to the snapshot. +// +// You can take a snapshot of an attached volume that is in use. However, snapshots +// only capture data that has been written to your EBS volume at the time the +// snapshot command is issued; this may exclude any data that has been cached +// by any applications or the operating system. If you can pause any file systems +// on the volume long enough to take a snapshot, your snapshot should be complete. +// However, if you cannot pause all file writes to the volume, you should unmount +// the volume from within the instance, issue the snapshot command, and then +// remount the volume to ensure a consistent and complete snapshot. You may +// remount and use your volume while the snapshot status is pending. +// +// To create a snapshot for EBS volumes that serve as root devices, you should +// stop the instance before taking the snapshot. +// +// Snapshots that are taken from encrypted volumes are automatically encrypted. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. Your encrypted volumes and any associated snapshots always remain +// protected. +// +// For more information, see Amazon Elastic Block Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) +// and Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" + +// CreateSpotDatafeedSubscriptionRequest generates a request for the CreateSpotDatafeedSubscription operation. +func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSubscriptionInput) (req *request.Request, output *CreateSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Creates a data feed for Spot instances, enabling you to view Spot instance +// usage logs. You can create one data feed per AWS account. For more information, +// see Spot Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateSpotDatafeedSubscription(input *CreateSpotDatafeedSubscriptionInput) (*CreateSpotDatafeedSubscriptionOutput, error) { + req, out := c.CreateSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateSubnet = "CreateSubnet" + +// CreateSubnetRequest generates a request for the CreateSubnet operation. +func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Request, output *CreateSubnetOutput) { + op := &request.Operation{ + Name: opCreateSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSubnetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSubnetOutput{} + req.Data = output + return +} + +// Creates a subnet in an existing VPC. +// +// When you create each subnet, you provide the VPC ID and the CIDR block you +// want for the subnet. After you create a subnet, you can't change its CIDR +// block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming +// you want only a single subnet in the VPC), or a subset of the VPC's CIDR +// block. If you create more than one subnet in a VPC, the subnets' CIDR blocks +// must not overlap. The smallest subnet (and VPC) you can create uses a /28 +// netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP +// addresses). +// +// AWS reserves both the first four and the last IP address in each subnet's +// CIDR block. They're not available for use. +// +// If you add more than one subnet to a VPC, they're set up in a star topology +// with a logical router in the middle. +// +// If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP +// address doesn't change if you stop and restart the instance (unlike a similar +// instance launched outside a VPC, which gets a new IP address when restarted). +// It's therefore possible to have a subnet with no running instances (they're +// all stopped), but no remaining IP addresses available. +// +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateSubnet(input *CreateSubnetInput) (*CreateSubnetOutput, error) { + req, out := c.CreateSubnetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a request for the CreateTags operation. +func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Adds or overwrites one or more tags for the specified Amazon EC2 resource +// or resources. Each resource can have a maximum of 10 tags. Each tag consists +// of a key and optional value. Tag keys must be unique per resource. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// creating IAM policies that control users' access to resources based on tags, +// see Supported Resource-Level Permissions for Amazon EC2 API Actions (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-iam-actions-resources.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateVolume = "CreateVolume" + +// CreateVolumeRequest generates a request for the CreateVolume operation. +func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Request, output *Volume) { + op := &request.Operation{ + Name: opCreateVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &Volume{} + req.Data = output + return +} + +// Creates an EBS volume that can be attached to an instance in the same Availability +// Zone. The volume is created in the regional endpoint that you send the HTTP +// request to. For more information see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). +// +// You can create a new empty volume or restore a volume from an EBS snapshot. +// Any AWS Marketplace product codes from the snapshot are propagated to the +// volume. +// +// You can create encrypted volumes with the Encrypted parameter. Encrypted +// volumes may only be attached to instances that support Amazon EBS encryption. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Creating or Restoring an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateVolume(input *CreateVolumeInput) (*Volume, error) { + req, out := c.CreateVolumeRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpc = "CreateVpc" + +// CreateVpcRequest generates a request for the CreateVpc operation. +func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, output *CreateVpcOutput) { + op := &request.Operation{ + Name: opCreateVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcOutput{} + req.Data = output + return +} + +// Creates a VPC with the specified CIDR block. +// +// The smallest VPC you can create uses a /28 netmask (16 IP addresses), and +// the largest uses a /16 netmask (65,536 IP addresses). To help you decide +// how big to make your VPC, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// By default, each instance you launch in the VPC has the default DHCP options, +// which includes only a default DNS server that we provide (AmazonProvidedDNS). +// For more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) { + req, out := c.CreateVpcRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpcEndpoint = "CreateVpcEndpoint" + +// CreateVpcEndpointRequest generates a request for the CreateVpcEndpoint operation. +func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *request.Request, output *CreateVpcEndpointOutput) { + op := &request.Operation{ + Name: opCreateVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcEndpointOutput{} + req.Data = output + return +} + +// Creates a VPC endpoint for a specified AWS service. An endpoint enables you +// to create a private connection between your VPC and another AWS service in +// your account. You can specify an endpoint policy to attach to the endpoint +// that will control access to the service from your VPC. You can also specify +// the VPC route tables that use the endpoint. +// +// Currently, only endpoints to Amazon S3 are supported. +func (c *EC2) CreateVpcEndpoint(input *CreateVpcEndpointInput) (*CreateVpcEndpointOutput, error) { + req, out := c.CreateVpcEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" + +// CreateVpcPeeringConnectionRequest generates a request for the CreateVpcPeeringConnection operation. +func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Requests a VPC peering connection between two VPCs: a requester VPC that +// you own and a peer VPC with which to create the connection. The peer VPC +// can belong to another AWS account. The requester VPC and peer VPC cannot +// have overlapping CIDR blocks. +// +// The owner of the peer VPC must accept the peering request to activate the +// peering connection. The VPC peering connection request expires after 7 days, +// after which it cannot be accepted or rejected. +// +// A CreateVpcPeeringConnection request between VPCs with overlapping CIDR +// blocks results in the VPC peering connection having a status of failed. +func (c *EC2) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnConnection = "CreateVpnConnection" + +// CreateVpnConnectionRequest generates a request for the CreateVpnConnection operation. +func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req *request.Request, output *CreateVpnConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnConnectionOutput{} + req.Data = output + return +} + +// Creates a VPN connection between an existing virtual private gateway and +// a VPN customer gateway. The only supported connection type is ipsec.1. +// +// The response includes information that you need to give to your network +// administrator to configure your customer gateway. +// +// We strongly recommend that you use HTTPS when calling this operation because +// the response contains sensitive cryptographic information for configuring +// your customer gateway. +// +// If you decide to shut down your VPN connection for any reason and later +// create a new VPN connection, you must reconfigure your customer gateway with +// the new information returned from this call. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnConnection(input *CreateVpnConnectionInput) (*CreateVpnConnectionOutput, error) { + req, out := c.CreateVpnConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" + +// CreateVpnConnectionRouteRequest generates a request for the CreateVpnConnectionRoute operation. +func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInput) (req *request.Request, output *CreateVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opCreateVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnConnectionRouteOutput{} + req.Data = output + return +} + +// Creates a static route associated with a VPN connection between an existing +// virtual private gateway and a VPN customer gateway. The static route allows +// traffic to be routed from the virtual private gateway to the VPN customer +// gateway. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnConnectionRoute(input *CreateVpnConnectionRouteInput) (*CreateVpnConnectionRouteOutput, error) { + req, out := c.CreateVpnConnectionRouteRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnGateway = "CreateVpnGateway" + +// CreateVpnGatewayRequest generates a request for the CreateVpnGateway operation. +func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *request.Request, output *CreateVpnGatewayOutput) { + op := &request.Operation{ + Name: opCreateVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnGatewayOutput{} + req.Data = output + return +} + +// Creates a virtual private gateway. A virtual private gateway is the endpoint +// on the VPC side of your VPN connection. You can create a virtual private +// gateway before creating the VPC itself. +// +// For more information about virtual private gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnGateway(input *CreateVpnGatewayInput) (*CreateVpnGatewayOutput, error) { + req, out := c.CreateVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCustomerGateway = "DeleteCustomerGateway" + +// DeleteCustomerGatewayRequest generates a request for the DeleteCustomerGateway operation. +func (c *EC2) DeleteCustomerGatewayRequest(input *DeleteCustomerGatewayInput) (req *request.Request, output *DeleteCustomerGatewayOutput) { + op := &request.Operation{ + Name: opDeleteCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomerGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCustomerGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified customer gateway. You must delete the VPN connection +// before you can delete the customer gateway. +func (c *EC2) DeleteCustomerGateway(input *DeleteCustomerGatewayInput) (*DeleteCustomerGatewayOutput, error) { + req, out := c.DeleteCustomerGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDhcpOptions = "DeleteDhcpOptions" + +// DeleteDhcpOptionsRequest generates a request for the DeleteDhcpOptions operation. +func (c *EC2) DeleteDhcpOptionsRequest(input *DeleteDhcpOptionsInput) (req *request.Request, output *DeleteDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDeleteDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDhcpOptionsOutput{} + req.Data = output + return +} + +// Deletes the specified set of DHCP options. You must disassociate the set +// of DHCP options before you can delete it. You can disassociate the set of +// DHCP options by associating either a new set of options or the default set +// of options with the VPC. +func (c *EC2) DeleteDhcpOptions(input *DeleteDhcpOptionsInput) (*DeleteDhcpOptionsOutput, error) { + req, out := c.DeleteDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFlowLogs = "DeleteFlowLogs" + +// DeleteFlowLogsRequest generates a request for the DeleteFlowLogs operation. +func (c *EC2) DeleteFlowLogsRequest(input *DeleteFlowLogsInput) (req *request.Request, output *DeleteFlowLogsOutput) { + op := &request.Operation{ + Name: opDeleteFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteFlowLogsOutput{} + req.Data = output + return +} + +// Deletes one or more flow logs. +func (c *EC2) DeleteFlowLogs(input *DeleteFlowLogsInput) (*DeleteFlowLogsOutput, error) { + req, out := c.DeleteFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInternetGateway = "DeleteInternetGateway" + +// DeleteInternetGatewayRequest generates a request for the DeleteInternetGateway operation. +func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (req *request.Request, output *DeleteInternetGatewayOutput) { + op := &request.Operation{ + Name: opDeleteInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteInternetGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified Internet gateway. You must detach the Internet gateway +// from the VPC before you can delete it. +func (c *EC2) DeleteInternetGateway(input *DeleteInternetGatewayInput) (*DeleteInternetGatewayOutput, error) { + req, out := c.DeleteInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteKeyPair = "DeleteKeyPair" + +// DeleteKeyPairRequest generates a request for the DeleteKeyPair operation. +func (c *EC2) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Request, output *DeleteKeyPairOutput) { + op := &request.Operation{ + Name: opDeleteKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteKeyPairOutput{} + req.Data = output + return +} + +// Deletes the specified key pair, by removing the public key from Amazon EC2. +func (c *EC2) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, error) { + req, out := c.DeleteKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkAcl = "DeleteNetworkAcl" + +// DeleteNetworkAclRequest generates a request for the DeleteNetworkAcl operation. +func (c *EC2) DeleteNetworkAclRequest(input *DeleteNetworkAclInput) (req *request.Request, output *DeleteNetworkAclOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNetworkAclOutput{} + req.Data = output + return +} + +// Deletes the specified network ACL. You can't delete the ACL if it's associated +// with any subnets. You can't delete the default network ACL. +func (c *EC2) DeleteNetworkAcl(input *DeleteNetworkAclInput) (*DeleteNetworkAclOutput, error) { + req, out := c.DeleteNetworkAclRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" + +// DeleteNetworkAclEntryRequest generates a request for the DeleteNetworkAclEntry operation. +func (c *EC2) DeleteNetworkAclEntryRequest(input *DeleteNetworkAclEntryInput) (req *request.Request, output *DeleteNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNetworkAclEntryOutput{} + req.Data = output + return +} + +// Deletes the specified ingress or egress entry (rule) from the specified network +// ACL. +func (c *EC2) DeleteNetworkAclEntry(input *DeleteNetworkAclEntryInput) (*DeleteNetworkAclEntryOutput, error) { + req, out := c.DeleteNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkInterface = "DeleteNetworkInterface" + +// DeleteNetworkInterfaceRequest generates a request for the DeleteNetworkInterface operation. +func (c *EC2) DeleteNetworkInterfaceRequest(input *DeleteNetworkInterfaceInput) (req *request.Request, output *DeleteNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDeleteNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNetworkInterfaceOutput{} + req.Data = output + return +} + +// Deletes the specified network interface. You must detach the network interface +// before you can delete it. +func (c *EC2) DeleteNetworkInterface(input *DeleteNetworkInterfaceInput) (*DeleteNetworkInterfaceOutput, error) { + req, out := c.DeleteNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDeletePlacementGroup = "DeletePlacementGroup" + +// DeletePlacementGroupRequest generates a request for the DeletePlacementGroup operation. +func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req *request.Request, output *DeletePlacementGroupOutput) { + op := &request.Operation{ + Name: opDeletePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlacementGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePlacementGroupOutput{} + req.Data = output + return +} + +// Deletes the specified placement group. You must terminate all instances in +// the placement group before you can delete the placement group. For more information +// about placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeletePlacementGroup(input *DeletePlacementGroupInput) (*DeletePlacementGroupOutput, error) { + req, out := c.DeletePlacementGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRoute = "DeleteRoute" + +// DeleteRouteRequest generates a request for the DeleteRoute operation. +func (c *EC2) DeleteRouteRequest(input *DeleteRouteInput) (req *request.Request, output *DeleteRouteOutput) { + op := &request.Operation{ + Name: opDeleteRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRouteOutput{} + req.Data = output + return +} + +// Deletes the specified route from the specified route table. +func (c *EC2) DeleteRoute(input *DeleteRouteInput) (*DeleteRouteOutput, error) { + req, out := c.DeleteRouteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRouteTable = "DeleteRouteTable" + +// DeleteRouteTableRequest generates a request for the DeleteRouteTable operation. +func (c *EC2) DeleteRouteTableRequest(input *DeleteRouteTableInput) (req *request.Request, output *DeleteRouteTableOutput) { + op := &request.Operation{ + Name: opDeleteRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRouteTableOutput{} + req.Data = output + return +} + +// Deletes the specified route table. You must disassociate the route table +// from any subnets before you can delete it. You can't delete the main route +// table. +func (c *EC2) DeleteRouteTable(input *DeleteRouteTableInput) (*DeleteRouteTableOutput, error) { + req, out := c.DeleteRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSecurityGroup = "DeleteSecurityGroup" + +// DeleteSecurityGroupRequest generates a request for the DeleteSecurityGroup operation. +func (c *EC2) DeleteSecurityGroupRequest(input *DeleteSecurityGroupInput) (req *request.Request, output *DeleteSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes a security group. +// +// If you attempt to delete a security group that is associated with an instance, +// or is referenced by another security group, the operation fails with InvalidGroup.InUse +// in EC2-Classic or DependencyViolation in EC2-VPC. +func (c *EC2) DeleteSecurityGroup(input *DeleteSecurityGroupInput) (*DeleteSecurityGroupOutput, error) { + req, out := c.DeleteSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// Deletes the specified snapshot. +// +// When you make periodic snapshots of a volume, the snapshots are incremental, +// and only the blocks on the device that have changed since your last snapshot +// are saved in the new snapshot. When you delete a snapshot, only the data +// not needed for any other snapshot is removed. So regardless of which prior +// snapshots have been deleted, all active snapshots will have access to all +// the information needed to restore the volume. +// +// You cannot delete a snapshot of the root device of an EBS volume used by +// a registered AMI. You must first de-register the AMI before you can delete +// the snapshot. +// +// For more information, see Deleting an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" + +// DeleteSpotDatafeedSubscriptionRequest generates a request for the DeleteSpotDatafeedSubscription operation. +func (c *EC2) DeleteSpotDatafeedSubscriptionRequest(input *DeleteSpotDatafeedSubscriptionInput) (req *request.Request, output *DeleteSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Deletes the data feed for Spot instances. +func (c *EC2) DeleteSpotDatafeedSubscription(input *DeleteSpotDatafeedSubscriptionInput) (*DeleteSpotDatafeedSubscriptionOutput, error) { + req, out := c.DeleteSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSubnet = "DeleteSubnet" + +// DeleteSubnetRequest generates a request for the DeleteSubnet operation. +func (c *EC2) DeleteSubnetRequest(input *DeleteSubnetInput) (req *request.Request, output *DeleteSubnetOutput) { + op := &request.Operation{ + Name: opDeleteSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubnetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSubnetOutput{} + req.Data = output + return +} + +// Deletes the specified subnet. You must terminate all running instances in +// the subnet before you can delete the subnet. +func (c *EC2) DeleteSubnet(input *DeleteSubnetInput) (*DeleteSubnetOutput, error) { + req, out := c.DeleteSubnetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified set of tags from the specified set of resources. This +// call is designed to follow a DescribeTags request. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVolume = "DeleteVolume" + +// DeleteVolumeRequest generates a request for the DeleteVolume operation. +func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { + op := &request.Operation{ + Name: opDeleteVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVolumeOutput{} + req.Data = output + return +} + +// Deletes the specified EBS volume. The volume must be in the available state +// (not attached to an instance). +// +// The volume may remain in the deleting state for several minutes. +// +// For more information, see Deleting an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpc = "DeleteVpc" + +// DeleteVpcRequest generates a request for the DeleteVpc operation. +func (c *EC2) DeleteVpcRequest(input *DeleteVpcInput) (req *request.Request, output *DeleteVpcOutput) { + op := &request.Operation{ + Name: opDeleteVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcOutput{} + req.Data = output + return +} + +// Deletes the specified VPC. You must detach or delete all gateways and resources +// that are associated with the VPC before you can delete it. For example, you +// must terminate all instances running in the VPC, delete all security groups +// associated with the VPC (except the default one), delete all route tables +// associated with the VPC (except the default one), and so on. +func (c *EC2) DeleteVpc(input *DeleteVpcInput) (*DeleteVpcOutput, error) { + req, out := c.DeleteVpcRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpcEndpoints = "DeleteVpcEndpoints" + +// DeleteVpcEndpointsRequest generates a request for the DeleteVpcEndpoints operation. +func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *request.Request, output *DeleteVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDeleteVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcEndpointsOutput{} + req.Data = output + return +} + +// Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes +// the endpoint routes in the route tables that were associated with the endpoint. +func (c *EC2) DeleteVpcEndpoints(input *DeleteVpcEndpointsInput) (*DeleteVpcEndpointsOutput, error) { + req, out := c.DeleteVpcEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" + +// DeleteVpcPeeringConnectionRequest generates a request for the DeleteVpcPeeringConnection operation. +func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Deletes a VPC peering connection. Either the owner of the requester VPC or +// the owner of the peer VPC can delete the VPC peering connection if it's in +// the active state. The owner of the requester VPC can delete a VPC peering +// connection in the pending-acceptance state. +func (c *EC2) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnConnection = "DeleteVpnConnection" + +// DeleteVpnConnectionRequest generates a request for the DeleteVpnConnection operation. +func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req *request.Request, output *DeleteVpnConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpnConnectionOutput{} + req.Data = output + return +} + +// Deletes the specified VPN connection. +// +// If you're deleting the VPC and its associated components, we recommend that +// you detach the virtual private gateway from the VPC and delete the VPC before +// deleting the VPN connection. If you believe that the tunnel credentials for +// your VPN connection have been compromised, you can delete the VPN connection +// and create a new one that has new keys, without needing to delete the VPC +// or virtual private gateway. If you create a new VPN connection, you must +// reconfigure the customer gateway using the new configuration information +// returned with the new VPN connection ID. +func (c *EC2) DeleteVpnConnection(input *DeleteVpnConnectionInput) (*DeleteVpnConnectionOutput, error) { + req, out := c.DeleteVpnConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" + +// DeleteVpnConnectionRouteRequest generates a request for the DeleteVpnConnectionRoute operation. +func (c *EC2) DeleteVpnConnectionRouteRequest(input *DeleteVpnConnectionRouteInput) (req *request.Request, output *DeleteVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpnConnectionRouteOutput{} + req.Data = output + return +} + +// Deletes the specified static route associated with a VPN connection between +// an existing virtual private gateway and a VPN customer gateway. The static +// route allows traffic to be routed from the virtual private gateway to the +// VPN customer gateway. +func (c *EC2) DeleteVpnConnectionRoute(input *DeleteVpnConnectionRouteInput) (*DeleteVpnConnectionRouteOutput, error) { + req, out := c.DeleteVpnConnectionRouteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnGateway = "DeleteVpnGateway" + +// DeleteVpnGatewayRequest generates a request for the DeleteVpnGateway operation. +func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *request.Request, output *DeleteVpnGatewayOutput) { + op := &request.Operation{ + Name: opDeleteVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpnGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified virtual private gateway. We recommend that before you +// delete a virtual private gateway, you detach it from the VPC and delete the +// VPN connection. Note that you don't need to delete the virtual private gateway +// if you plan to delete and recreate the VPN connection between your VPC and +// your network. +func (c *EC2) DeleteVpnGateway(input *DeleteVpnGatewayInput) (*DeleteVpnGatewayOutput, error) { + req, out := c.DeleteVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterImage = "DeregisterImage" + +// DeregisterImageRequest generates a request for the DeregisterImage operation. +func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request.Request, output *DeregisterImageOutput) { + op := &request.Operation{ + Name: opDeregisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterImageInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterImageOutput{} + req.Data = output + return +} + +// Deregisters the specified AMI. After you deregister an AMI, it can't be used +// to launch new instances. +// +// This command does not delete the AMI. +func (c *EC2) DeregisterImage(input *DeregisterImageInput) (*DeregisterImageOutput, error) { + req, out := c.DeregisterImageRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a request for the DescribeAccountAttributes operation. +func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountAttributesOutput{} + req.Data = output + return +} + +// Describes attributes of your AWS account. The following are the supported +// account attributes: +// +// supported-platforms: Indicates whether your account can launch instances +// into EC2-Classic and EC2-VPC, or only into EC2-VPC. +// +// default-vpc: The ID of the default VPC for your account, or none. +// +// max-instances: The maximum number of On-Demand instances that you can +// run. +// +// vpc-max-security-groups-per-interface: The maximum number of security +// groups that you can assign to a network interface. +// +// max-elastic-ips: The maximum number of Elastic IP addresses that you can +// allocate for use with EC2-Classic. +// +// vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you +// can allocate for use with EC2-VPC. +func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAddresses = "DescribeAddresses" + +// DescribeAddressesRequest generates a request for the DescribeAddresses operation. +func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *request.Request, output *DescribeAddressesOutput) { + op := &request.Operation{ + Name: opDescribeAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAddressesOutput{} + req.Data = output + return +} + +// Describes one or more of your Elastic IP addresses. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddressesOutput, error) { + req, out := c.DescribeAddressesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAvailabilityZones = "DescribeAvailabilityZones" + +// DescribeAvailabilityZonesRequest generates a request for the DescribeAvailabilityZones operation. +func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesInput) (req *request.Request, output *DescribeAvailabilityZonesOutput) { + op := &request.Operation{ + Name: opDescribeAvailabilityZones, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAvailabilityZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAvailabilityZonesOutput{} + req.Data = output + return +} + +// Describes one or more of the Availability Zones that are available to you. +// The results include zones only for the region you're currently using. If +// there is an event impacting an Availability Zone, you can use this request +// to view the state and any provided message for that Availability Zone. +// +// For more information, see Regions and Availability Zones (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeAvailabilityZones(input *DescribeAvailabilityZonesInput) (*DescribeAvailabilityZonesOutput, error) { + req, out := c.DescribeAvailabilityZonesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBundleTasks = "DescribeBundleTasks" + +// DescribeBundleTasksRequest generates a request for the DescribeBundleTasks operation. +func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req *request.Request, output *DescribeBundleTasksOutput) { + op := &request.Operation{ + Name: opDescribeBundleTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBundleTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBundleTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your bundling tasks. +// +// Completed bundle tasks are listed for only a limited time. If your bundle +// task is no longer in the list, you can still register an AMI from it. Just +// use RegisterImage with the Amazon S3 bucket name and image manifest name +// you provided to the bundle task. +func (c *EC2) DescribeBundleTasks(input *DescribeBundleTasksInput) (*DescribeBundleTasksOutput, error) { + req, out := c.DescribeBundleTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" + +// DescribeClassicLinkInstancesRequest generates a request for the DescribeClassicLinkInstances operation. +func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInstancesInput) (req *request.Request, output *DescribeClassicLinkInstancesOutput) { + op := &request.Operation{ + Name: opDescribeClassicLinkInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClassicLinkInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClassicLinkInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your linked EC2-Classic instances. This request +// only returns information about EC2-Classic instances linked to a VPC through +// ClassicLink; you cannot use this request to return information about other +// instances. +func (c *EC2) DescribeClassicLinkInstances(input *DescribeClassicLinkInstancesInput) (*DescribeClassicLinkInstancesOutput, error) { + req, out := c.DescribeClassicLinkInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConversionTasks = "DescribeConversionTasks" + +// DescribeConversionTasksRequest generates a request for the DescribeConversionTasks operation. +func (c *EC2) DescribeConversionTasksRequest(input *DescribeConversionTasksInput) (req *request.Request, output *DescribeConversionTasksOutput) { + op := &request.Operation{ + Name: opDescribeConversionTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConversionTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConversionTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your conversion tasks. For more information, see +// Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeConversionTasks(input *DescribeConversionTasksInput) (*DescribeConversionTasksOutput, error) { + req, out := c.DescribeConversionTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCustomerGateways = "DescribeCustomerGateways" + +// DescribeCustomerGatewaysRequest generates a request for the DescribeCustomerGateways operation. +func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInput) (req *request.Request, output *DescribeCustomerGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeCustomerGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCustomerGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCustomerGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your VPN customer gateways. +// +// For more information about VPN customer gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeCustomerGateways(input *DescribeCustomerGatewaysInput) (*DescribeCustomerGatewaysOutput, error) { + req, out := c.DescribeCustomerGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDhcpOptions = "DescribeDhcpOptions" + +// DescribeDhcpOptionsRequest generates a request for the DescribeDhcpOptions operation. +func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req *request.Request, output *DescribeDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDescribeDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDhcpOptionsOutput{} + req.Data = output + return +} + +// Describes one or more of your DHCP options sets. +// +// For more information about DHCP options sets, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeDhcpOptions(input *DescribeDhcpOptionsInput) (*DescribeDhcpOptionsOutput, error) { + req, out := c.DescribeDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. +func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your export tasks. +func (c *EC2) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFlowLogs = "DescribeFlowLogs" + +// DescribeFlowLogsRequest generates a request for the DescribeFlowLogs operation. +func (c *EC2) DescribeFlowLogsRequest(input *DescribeFlowLogsInput) (req *request.Request, output *DescribeFlowLogsOutput) { + op := &request.Operation{ + Name: opDescribeFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFlowLogsOutput{} + req.Data = output + return +} + +// Describes one or more flow logs. To view the information in your flow logs +// (the log streams for the network interfaces), you must use the CloudWatch +// Logs console or the CloudWatch Logs API. +func (c *EC2) DescribeFlowLogs(input *DescribeFlowLogsInput) (*DescribeFlowLogsOutput, error) { + req, out := c.DescribeFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImageAttribute = "DescribeImageAttribute" + +// DescribeImageAttributeRequest generates a request for the DescribeImageAttribute operation. +func (c *EC2) DescribeImageAttributeRequest(input *DescribeImageAttributeInput) (req *request.Request, output *DescribeImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImageAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +func (c *EC2) DescribeImageAttribute(input *DescribeImageAttributeInput) (*DescribeImageAttributeOutput, error) { + req, out := c.DescribeImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImages = "DescribeImages" + +// DescribeImagesRequest generates a request for the DescribeImages operation. +func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { + op := &request.Operation{ + Name: opDescribeImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImagesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImagesOutput{} + req.Data = output + return +} + +// Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. +// Images available to you include public images, private images that you own, +// and private images owned by other AWS accounts but for which you have explicit +// launch permissions. +// +// Deregistered images are included in the returned results for an unspecified +// interval after deregistration. +func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImportImageTasks = "DescribeImportImageTasks" + +// DescribeImportImageTasksRequest generates a request for the DescribeImportImageTasks operation. +func (c *EC2) DescribeImportImageTasksRequest(input *DescribeImportImageTasksInput) (req *request.Request, output *DescribeImportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportImageTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImportImageTasksOutput{} + req.Data = output + return +} + +// Displays details about an import virtual machine or import snapshot tasks +// that are already created. +func (c *EC2) DescribeImportImageTasks(input *DescribeImportImageTasksInput) (*DescribeImportImageTasksOutput, error) { + req, out := c.DescribeImportImageTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" + +// DescribeImportSnapshotTasksRequest generates a request for the DescribeImportSnapshotTasks operation. +func (c *EC2) DescribeImportSnapshotTasksRequest(input *DescribeImportSnapshotTasksInput) (req *request.Request, output *DescribeImportSnapshotTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportSnapshotTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportSnapshotTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImportSnapshotTasksOutput{} + req.Data = output + return +} + +// Describes your import snapshot tasks. +func (c *EC2) DescribeImportSnapshotTasks(input *DescribeImportSnapshotTasksInput) (*DescribeImportSnapshotTasksOutput, error) { + req, out := c.DescribeImportSnapshotTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceAttribute = "DescribeInstanceAttribute" + +// DescribeInstanceAttributeRequest generates a request for the DescribeInstanceAttribute operation. +func (c *EC2) DescribeInstanceAttributeRequest(input *DescribeInstanceAttributeInput) (req *request.Request, output *DescribeInstanceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified instance. You can specify +// only one attribute at a time. Valid attribute values are: instanceType | +// kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior +// | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | +// groupSet | ebsOptimized | sriovNetSupport +func (c *EC2) DescribeInstanceAttribute(input *DescribeInstanceAttributeInput) (*DescribeInstanceAttributeOutput, error) { + req, out := c.DescribeInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceStatus = "DescribeInstanceStatus" + +// DescribeInstanceStatusRequest generates a request for the DescribeInstanceStatus operation. +func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) (req *request.Request, output *DescribeInstanceStatusOutput) { + op := &request.Operation{ + Name: opDescribeInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceStatusOutput{} + req.Data = output + return +} + +// Describes the status of one or more instances. +// +// Instance status includes the following components: +// +// Status checks - Amazon EC2 performs status checks on running EC2 instances +// to identify hardware and software issues. For more information, see Status +// Checks for Your Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) +// and Troubleshooting Instances with Failed Status Checks (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, +// or terminate) for your instances related to hardware issues, software updates, +// or system maintenance. For more information, see Scheduled Events for Your +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Instance state - You can manage your instances from the moment you launch +// them through their termination. For more information, see Instance Lifecycle +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*DescribeInstanceStatusOutput, error) { + req, out := c.DescribeInstanceStatusRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(p *DescribeInstanceStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeInstanceStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeInstanceStatusOutput), lastPage) + }) +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a request for the DescribeInstances operation. +func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your instances. +// +// If you specify one or more instance IDs, Amazon EC2 returns information +// for those instances. If you do not specify instance IDs, Amazon EC2 returns +// information for all relevant instances. If you specify an instance ID that +// is not valid, an error is returned. If you specify an instance that you do +// not own, it is not included in the returned results. +// +// Recently terminated instances might appear in the returned results. This +// interval is usually less than one hour. +func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(p *DescribeInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeInstancesOutput), lastPage) + }) +} + +const opDescribeInternetGateways = "DescribeInternetGateways" + +// DescribeInternetGatewaysRequest generates a request for the DescribeInternetGateways operation. +func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInput) (req *request.Request, output *DescribeInternetGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeInternetGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInternetGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInternetGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your Internet gateways. +func (c *EC2) DescribeInternetGateways(input *DescribeInternetGatewaysInput) (*DescribeInternetGatewaysOutput, error) { + req, out := c.DescribeInternetGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeKeyPairs = "DescribeKeyPairs" + +// DescribeKeyPairsRequest generates a request for the DescribeKeyPairs operation. +func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *request.Request, output *DescribeKeyPairsOutput) { + op := &request.Operation{ + Name: opDescribeKeyPairs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKeyPairsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeKeyPairsOutput{} + req.Data = output + return +} + +// Describes one or more of your key pairs. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeKeyPairs(input *DescribeKeyPairsInput) (*DescribeKeyPairsOutput, error) { + req, out := c.DescribeKeyPairsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMovingAddresses = "DescribeMovingAddresses" + +// DescribeMovingAddressesRequest generates a request for the DescribeMovingAddresses operation. +func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput) (req *request.Request, output *DescribeMovingAddressesOutput) { + op := &request.Operation{ + Name: opDescribeMovingAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMovingAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMovingAddressesOutput{} + req.Data = output + return +} + +// Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, +// or that are being restored to the EC2-Classic platform. This request does +// not return information about any other Elastic IP addresses in your account. +func (c *EC2) DescribeMovingAddresses(input *DescribeMovingAddressesInput) (*DescribeMovingAddressesOutput, error) { + req, out := c.DescribeMovingAddressesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkAcls = "DescribeNetworkAcls" + +// DescribeNetworkAclsRequest generates a request for the DescribeNetworkAcls operation. +func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req *request.Request, output *DescribeNetworkAclsOutput) { + op := &request.Operation{ + Name: opDescribeNetworkAcls, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkAclsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkAclsOutput{} + req.Data = output + return +} + +// Describes one or more of your network ACLs. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeNetworkAcls(input *DescribeNetworkAclsInput) (*DescribeNetworkAclsOutput, error) { + req, out := c.DescribeNetworkAclsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" + +// DescribeNetworkInterfaceAttributeRequest generates a request for the DescribeNetworkInterfaceAttribute operation. +func (c *EC2) DescribeNetworkInterfaceAttributeRequest(input *DescribeNetworkInterfaceAttributeInput) (req *request.Request, output *DescribeNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Describes a network interface attribute. You can specify only one attribute +// at a time. +func (c *EC2) DescribeNetworkInterfaceAttribute(input *DescribeNetworkInterfaceAttributeInput) (*DescribeNetworkInterfaceAttributeOutput, error) { + req, out := c.DescribeNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" + +// DescribeNetworkInterfacesRequest generates a request for the DescribeNetworkInterfaces operation. +func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesInput) (req *request.Request, output *DescribeNetworkInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfacesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkInterfacesOutput{} + req.Data = output + return +} + +// Describes one or more of your network interfaces. +func (c *EC2) DescribeNetworkInterfaces(input *DescribeNetworkInterfacesInput) (*DescribeNetworkInterfacesOutput, error) { + req, out := c.DescribeNetworkInterfacesRequest(input) + err := req.Send() + return out, err +} + +const opDescribePlacementGroups = "DescribePlacementGroups" + +// DescribePlacementGroupsRequest generates a request for the DescribePlacementGroups operation. +func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput) (req *request.Request, output *DescribePlacementGroupsOutput) { + op := &request.Operation{ + Name: opDescribePlacementGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePlacementGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePlacementGroupsOutput{} + req.Data = output + return +} + +// Describes one or more of your placement groups. For more information about +// placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribePlacementGroups(input *DescribePlacementGroupsInput) (*DescribePlacementGroupsOutput, error) { + req, out := c.DescribePlacementGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribePrefixLists = "DescribePrefixLists" + +// DescribePrefixListsRequest generates a request for the DescribePrefixLists operation. +func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req *request.Request, output *DescribePrefixListsOutput) { + op := &request.Operation{ + Name: opDescribePrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePrefixListsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePrefixListsOutput{} + req.Data = output + return +} + +// Describes available AWS services in a prefix list format, which includes +// the prefix list name and prefix list ID of the service and the IP address +// range for the service. A prefix list ID is required for creating an outbound +// security group rule that allows traffic from a VPC to access an AWS service +// through a VPC endpoint. +func (c *EC2) DescribePrefixLists(input *DescribePrefixListsInput) (*DescribePrefixListsOutput, error) { + req, out := c.DescribePrefixListsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRegions = "DescribeRegions" + +// DescribeRegionsRequest generates a request for the DescribeRegions operation. +func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request.Request, output *DescribeRegionsOutput) { + op := &request.Operation{ + Name: opDescribeRegions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRegionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRegionsOutput{} + req.Data = output + return +} + +// Describes one or more regions that are currently available to you. +// +// For a list of the regions supported by Amazon EC2, see Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). +func (c *EC2) DescribeRegions(input *DescribeRegionsInput) (*DescribeRegionsOutput, error) { + req, out := c.DescribeRegionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstances = "DescribeReservedInstances" + +// DescribeReservedInstancesRequest generates a request for the DescribeReservedInstances operation. +func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesInput) (req *request.Request, output *DescribeReservedInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of the Reserved Instances that you purchased. +// +// For more information about Reserved Instances, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) (*DescribeReservedInstancesOutput, error) { + req, out := c.DescribeReservedInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" + +// DescribeReservedInstancesListingsRequest generates a request for the DescribeReservedInstancesListings operation. +func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedInstancesListingsInput) (req *request.Request, output *DescribeReservedInstancesListingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesListings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesListingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesListingsOutput{} + req.Data = output + return +} + +// Describes your account's Reserved Instance listings in the Reserved Instance +// Marketplace. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// As a seller, you choose to list some or all of your Reserved Instances, +// and you specify the upfront price to receive for them. Your Reserved Instances +// are then listed in the Reserved Instance Marketplace and are available for +// purchase. +// +// As a buyer, you specify the configuration of the Reserved Instance to purchase, +// and the Marketplace matches what you're searching for with what's available. +// The Marketplace first sells the lowest priced Reserved Instances to you, +// and continues to sell available Reserved Instance listings to you until your +// demand is met. You are charged based on the total price of all of the listings +// that you purchase. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesListings(input *DescribeReservedInstancesListingsInput) (*DescribeReservedInstancesListingsOutput, error) { + req, out := c.DescribeReservedInstancesListingsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModifications" + +// DescribeReservedInstancesModificationsRequest generates a request for the DescribeReservedInstancesModifications operation. +func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReservedInstancesModificationsInput) (req *request.Request, output *DescribeReservedInstancesModificationsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesModifications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesModificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesModificationsOutput{} + req.Data = output + return +} + +// Describes the modifications made to your Reserved Instances. If no parameter +// is specified, information about all your Reserved Instances modification +// requests is returned. If a modification ID is specified, only information +// about the specific modification is returned. +// +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInstancesModificationsInput) (*DescribeReservedInstancesModificationsOutput, error) { + req, out := c.DescribeReservedInstancesModificationsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(p *DescribeReservedInstancesModificationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedInstancesModificationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedInstancesModificationsOutput), lastPage) + }) +} + +const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings" + +// DescribeReservedInstancesOfferingsRequest generates a request for the DescribeReservedInstancesOfferings operation. +func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedInstancesOfferingsInput) (req *request.Request, output *DescribeReservedInstancesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesOfferingsOutput{} + req.Data = output + return +} + +// Describes Reserved Instance offerings that are available for purchase. With +// Reserved Instances, you purchase the right to launch instances for a period +// of time. During that time period, you do not receive insufficient capacity +// errors, and you pay a lower usage rate than the rate charged for On-Demand +// instances for the actual time used. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstancesOfferingsInput) (*DescribeReservedInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedInstancesOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(p *DescribeReservedInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedInstancesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedInstancesOfferingsOutput), lastPage) + }) +} + +const opDescribeRouteTables = "DescribeRouteTables" + +// DescribeRouteTablesRequest generates a request for the DescribeRouteTables operation. +func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req *request.Request, output *DescribeRouteTablesOutput) { + op := &request.Operation{ + Name: opDescribeRouteTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRouteTablesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRouteTablesOutput{} + req.Data = output + return +} + +// Describes one or more of your route tables. +// +// Each subnet in your VPC must be associated with a route table. If a subnet +// is not explicitly associated with any route table, it is implicitly associated +// with the main route table. This command does not return the subnet ID for +// implicit associations. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRouteTablesOutput, error) { + req, out := c.DescribeRouteTablesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSecurityGroups = "DescribeSecurityGroups" + +// DescribeSecurityGroupsRequest generates a request for the DescribeSecurityGroups operation. +func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput) (req *request.Request, output *DescribeSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSecurityGroupsOutput{} + req.Data = output + return +} + +// Describes one or more of your security groups. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeSecurityGroups(input *DescribeSecurityGroupsInput) (*DescribeSecurityGroupsOutput, error) { + req, out := c.DescribeSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" + +// DescribeSnapshotAttributeRequest generates a request for the DescribeSnapshotAttribute operation. +func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeInput) (req *request.Request, output *DescribeSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified snapshot. You can specify +// only one attribute at a time. +// +// For more information about EBS snapshots, see Amazon EBS Snapshots in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) (*DescribeSnapshotAttributeOutput, error) { + req, out := c.DescribeSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// Describes one or more of the EBS snapshots available to you. Available snapshots +// include public snapshots available for any AWS account to launch, private +// snapshots that you own, and private snapshots owned by another AWS account +// but for which you've been given explicit create volume permissions. +// +// The create volume permissions fall into the following categories: +// +// public: The owner of the snapshot granted create volume permissions for +// the snapshot to the all group. All AWS accounts have create volume permissions +// for these snapshots. explicit: The owner of the snapshot granted create +// volume permissions to a specific AWS account. implicit: An AWS account has +// implicit create volume permissions for all snapshots it owns. The list of +// snapshots returned can be modified by specifying snapshot IDs, snapshot owners, +// or AWS accounts with create volume permissions. If no options are specified, +// Amazon EC2 returns all snapshots for which you have create volume permissions. +// +// If you specify one or more snapshot IDs, only snapshots that have the specified +// IDs are returned. If you specify an invalid snapshot ID, an error is returned. +// If you specify a snapshot ID for which you do not have access, it is not +// included in the returned results. +// +// If you specify one or more snapshot owners, only snapshots from the specified +// owners and for which you have access are returned. The results can include +// the AWS account IDs of the specified owners, amazon for snapshots owned by +// Amazon, or self for snapshots that you own. +// +// If you specify a list of restorable users, only snapshots with create snapshot +// permissions for those users are returned. You can specify AWS account IDs +// (if you own the snapshots), self for snapshots for which you own or have +// explicit permissions, or all for public snapshots. +// +// If you are describing a long list of snapshots, you can paginate the output +// to make the list more manageable. The MaxResults parameter sets the maximum +// number of results returned in a single page. If the list of results exceeds +// your MaxResults value, then that number of results is returned along with +// a NextToken value that can be passed to a subsequent DescribeSnapshots request +// to retrieve the remaining results. +// +// For more information about EBS snapshots, see Amazon EBS Snapshots in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSnapshotsOutput), lastPage) + }) +} + +const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" + +// DescribeSpotDatafeedSubscriptionRequest generates a request for the DescribeSpotDatafeedSubscription operation. +func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafeedSubscriptionInput) (req *request.Request, output *DescribeSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDescribeSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Describes the data feed for Spot instances. For more information, see Spot +// Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSpotDatafeedSubscription(input *DescribeSpotDatafeedSubscriptionInput) (*DescribeSpotDatafeedSubscriptionOutput, error) { + req, out := c.DescribeSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" + +// DescribeSpotFleetInstancesRequest generates a request for the DescribeSpotFleetInstances operation. +func (c *EC2) DescribeSpotFleetInstancesRequest(input *DescribeSpotFleetInstancesInput) (req *request.Request, output *DescribeSpotFleetInstancesOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetInstancesOutput{} + req.Data = output + return +} + +// Describes the running instances for the specified Spot fleet. +func (c *EC2) DescribeSpotFleetInstances(input *DescribeSpotFleetInstancesInput) (*DescribeSpotFleetInstancesOutput, error) { + req, out := c.DescribeSpotFleetInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" + +// DescribeSpotFleetRequestHistoryRequest generates a request for the DescribeSpotFleetRequestHistory operation. +func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetRequestHistoryInput) (req *request.Request, output *DescribeSpotFleetRequestHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequestHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetRequestHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetRequestHistoryOutput{} + req.Data = output + return +} + +// Describes the events for the specified Spot fleet request during the specified +// time. +// +// Spot fleet events are delayed by up to 30 seconds before they can be described. +// This ensures that you can query by the last evaluated time and not miss a +// recorded event. +func (c *EC2) DescribeSpotFleetRequestHistory(input *DescribeSpotFleetRequestHistoryInput) (*DescribeSpotFleetRequestHistoryOutput, error) { + req, out := c.DescribeSpotFleetRequestHistoryRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" + +// DescribeSpotFleetRequestsRequest generates a request for the DescribeSpotFleetRequests operation. +func (c *EC2) DescribeSpotFleetRequestsRequest(input *DescribeSpotFleetRequestsInput) (req *request.Request, output *DescribeSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetRequestsOutput{} + req.Data = output + return +} + +// Describes your Spot fleet requests. +func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) (*DescribeSpotFleetRequestsOutput, error) { + req, out := c.DescribeSpotFleetRequestsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" + +// DescribeSpotInstanceRequestsRequest generates a request for the DescribeSpotInstanceRequests operation. +func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceRequestsInput) (req *request.Request, output *DescribeSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotInstanceRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotInstanceRequestsOutput{} + req.Data = output + return +} + +// Describes the Spot instance requests that belong to your account. Spot instances +// are instances that Amazon EC2 launches when the bid price that you specify +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price +// based on available Spot instance capacity and current Spot instance requests. +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can use DescribeSpotInstanceRequests to find a running Spot instance +// by examining the response. If the status of the Spot instance is fulfilled, +// the instance ID appears in the response and contains the identifier of the +// instance. Alternatively, you can use DescribeInstances with a filter to look +// for instances where the instance lifecycle is spot. +func (c *EC2) DescribeSpotInstanceRequests(input *DescribeSpotInstanceRequestsInput) (*DescribeSpotInstanceRequestsOutput, error) { + req, out := c.DescribeSpotInstanceRequestsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" + +// DescribeSpotPriceHistoryRequest generates a request for the DescribeSpotPriceHistory operation. +func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInput) (req *request.Request, output *DescribeSpotPriceHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotPriceHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSpotPriceHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotPriceHistoryOutput{} + req.Data = output + return +} + +// Describes the Spot price history. The prices returned are listed in chronological +// order, from the oldest to the most recent, for up to the past 90 days. For +// more information, see Spot Instance Pricing History (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// When you specify a start and end time, this operation returns the prices +// of the instance types within the time range that you specified and the time +// when the price changed. The price is valid within the time period that you +// specified; the response merely indicates the last time that the price changed. +func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*DescribeSpotPriceHistoryOutput, error) { + req, out := c.DescribeSpotPriceHistoryRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(p *DescribeSpotPriceHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSpotPriceHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSpotPriceHistoryOutput), lastPage) + }) +} + +const opDescribeSubnets = "DescribeSubnets" + +// DescribeSubnetsRequest generates a request for the DescribeSubnets operation. +func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request.Request, output *DescribeSubnetsOutput) { + op := &request.Operation{ + Name: opDescribeSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSubnetsOutput{} + req.Data = output + return +} + +// Describes one or more of your subnets. +// +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeSubnets(input *DescribeSubnetsInput) (*DescribeSubnetsOutput, error) { + req, out := c.DescribeSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes one or more of the tags for your EC2 resources. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTagsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTagsOutput), lastPage) + }) +} + +const opDescribeVolumeAttribute = "DescribeVolumeAttribute" + +// DescribeVolumeAttributeRequest generates a request for the DescribeVolumeAttribute operation. +func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumeAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumeAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified volume. You can specify +// only one attribute at a time. +// +// For more information about EBS volumes, see Amazon EBS Volumes in the Amazon +// Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVolumeStatus = "DescribeVolumeStatus" + +// DescribeVolumeStatusRequest generates a request for the DescribeVolumeStatus operation. +func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req *request.Request, output *DescribeVolumeStatusOutput) { + op := &request.Operation{ + Name: opDescribeVolumeStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumeStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumeStatusOutput{} + req.Data = output + return +} + +// Describes the status of the specified volumes. Volume status provides the +// result of the checks performed on your volumes to determine events that can +// impair the performance of your volumes. The performance of a volume can be +// affected if an issue occurs on the volume's underlying host. If the volume's +// underlying host experiences a power outage or system issue, after the system +// is restored, there could be data inconsistencies on the volume. Volume events +// notify you if this occurs. Volume actions notify you if any action needs +// to be taken in response to the event. +// +// The DescribeVolumeStatus operation provides the following information about +// the specified volumes: +// +// Status: Reflects the current status of the volume. The possible values are +// ok, impaired , warning, or insufficient-data. If all checks pass, the overall +// status of the volume is ok. If the check fails, the overall status is impaired. +// If the status is insufficient-data, then the checks may still be taking place +// on your volume at the time. We recommend that you retry the request. For +// more information on volume status, see Monitoring the Status of Your Volumes +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html). +// +// Events: Reflect the cause of a volume status and may require you to take +// action. For example, if your volume returns an impaired status, then the +// volume event might be potential-data-inconsistency. This means that your +// volume has been affected by an issue with the underlying host, has all I/O +// operations disabled, and may have inconsistent data. +// +// Actions: Reflect the actions you may have to take in response to an event. +// For example, if the status of the volume is impaired and the volume event +// shows potential-data-inconsistency, then the action shows enable-volume-io. +// This means that you may want to enable the I/O operations for the volume +// by calling the EnableVolumeIO action and then check the volume for data consistency. +// +// Volume status is based on the volume status checks, and does not reflect +// the volume state. Therefore, volume status does not indicate volumes in the +// error state (for example, when a volume is incapable of accepting I/O.) +func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeVolumeStatusOutput, error) { + req, out := c.DescribeVolumeStatusRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(p *DescribeVolumeStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVolumeStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVolumeStatusOutput), lastPage) + }) +} + +const opDescribeVolumes = "DescribeVolumes" + +// DescribeVolumesRequest generates a request for the DescribeVolumes operation. +func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { + op := &request.Operation{ + Name: opDescribeVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumesOutput{} + req.Data = output + return +} + +// Describes the specified EBS volumes. +// +// If you are describing a long list of volumes, you can paginate the output +// to make the list more manageable. The MaxResults parameter sets the maximum +// number of results returned in a single page. If the list of results exceeds +// your MaxResults value, then that number of results is returned along with +// a NextToken value that can be passed to a subsequent DescribeVolumes request +// to retrieve the remaining results. +// +// For more information about EBS volumes, see Amazon EBS Volumes in the Amazon +// Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(p *DescribeVolumesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVolumesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVolumesOutput), lastPage) + }) +} + +const opDescribeVpcAttribute = "DescribeVpcAttribute" + +// DescribeVpcAttributeRequest generates a request for the DescribeVpcAttribute operation. +func (c *EC2) DescribeVpcAttributeRequest(input *DescribeVpcAttributeInput) (req *request.Request, output *DescribeVpcAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified VPC. You can specify only +// one attribute at a time. +func (c *EC2) DescribeVpcAttribute(input *DescribeVpcAttributeInput) (*DescribeVpcAttributeOutput, error) { + req, out := c.DescribeVpcAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcClassicLink = "DescribeVpcClassicLink" + +// DescribeVpcClassicLinkRequest generates a request for the DescribeVpcClassicLink operation. +func (c *EC2) DescribeVpcClassicLinkRequest(input *DescribeVpcClassicLinkInput) (req *request.Request, output *DescribeVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcClassicLinkOutput{} + req.Data = output + return +} + +// Describes the ClassicLink status of one or more VPCs. +func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*DescribeVpcClassicLinkOutput, error) { + req, out := c.DescribeVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" + +// DescribeVpcEndpointServicesRequest generates a request for the DescribeVpcEndpointServices operation. +func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServicesInput) (req *request.Request, output *DescribeVpcEndpointServicesOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcEndpointServicesOutput{} + req.Data = output + return +} + +// Describes all supported AWS services that can be specified when creating +// a VPC endpoint. +func (c *EC2) DescribeVpcEndpointServices(input *DescribeVpcEndpointServicesInput) (*DescribeVpcEndpointServicesOutput, error) { + req, out := c.DescribeVpcEndpointServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcEndpoints = "DescribeVpcEndpoints" + +// DescribeVpcEndpointsRequest generates a request for the DescribeVpcEndpoints operation. +func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req *request.Request, output *DescribeVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcEndpointsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPC endpoints. +func (c *EC2) DescribeVpcEndpoints(input *DescribeVpcEndpointsInput) (*DescribeVpcEndpointsOutput, error) { + req, out := c.DescribeVpcEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" + +// DescribeVpcPeeringConnectionsRequest generates a request for the DescribeVpcPeeringConnections operation. +func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpcPeeringConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcPeeringConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcPeeringConnectionsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPC peering connections. +func (c *EC2) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcs = "DescribeVpcs" + +// DescribeVpcsRequest generates a request for the DescribeVpcs operation. +func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Request, output *DescribeVpcsOutput) { + op := &request.Operation{ + Name: opDescribeVpcs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPCs. +func (c *EC2) DescribeVpcs(input *DescribeVpcsInput) (*DescribeVpcsOutput, error) { + req, out := c.DescribeVpcsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpnConnections = "DescribeVpnConnections" + +// DescribeVpnConnectionsRequest generates a request for the DescribeVpnConnections operation. +func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) (req *request.Request, output *DescribeVpnConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpnConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpnConnectionsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPN connections. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeVpnConnections(input *DescribeVpnConnectionsInput) (*DescribeVpnConnectionsOutput, error) { + req, out := c.DescribeVpnConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpnGateways = "DescribeVpnGateways" + +// DescribeVpnGatewaysRequest generates a request for the DescribeVpnGateways operation. +func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req *request.Request, output *DescribeVpnGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeVpnGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpnGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your virtual private gateways. +// +// For more information about virtual private gateways, see Adding an IPsec +// Hardware VPN to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeVpnGateways(input *DescribeVpnGatewaysInput) (*DescribeVpnGatewaysOutput, error) { + req, out := c.DescribeVpnGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDetachClassicLinkVpc = "DetachClassicLinkVpc" + +// DetachClassicLinkVpcRequest generates a request for the DetachClassicLinkVpc operation. +func (c *EC2) DetachClassicLinkVpcRequest(input *DetachClassicLinkVpcInput) (req *request.Request, output *DetachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opDetachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachClassicLinkVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachClassicLinkVpcOutput{} + req.Data = output + return +} + +// Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance +// has been unlinked, the VPC security groups are no longer associated with +// it. An instance is automatically unlinked from a VPC when it's stopped. +func (c *EC2) DetachClassicLinkVpc(input *DetachClassicLinkVpcInput) (*DetachClassicLinkVpcOutput, error) { + req, out := c.DetachClassicLinkVpcRequest(input) + err := req.Send() + return out, err +} + +const opDetachInternetGateway = "DetachInternetGateway" + +// DetachInternetGatewayRequest generates a request for the DetachInternetGateway operation. +func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (req *request.Request, output *DetachInternetGatewayOutput) { + op := &request.Operation{ + Name: opDetachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachInternetGatewayOutput{} + req.Data = output + return +} + +// Detaches an Internet gateway from a VPC, disabling connectivity between the +// Internet and the VPC. The VPC must not contain any running instances with +// Elastic IP addresses. +func (c *EC2) DetachInternetGateway(input *DetachInternetGatewayInput) (*DetachInternetGatewayOutput, error) { + req, out := c.DetachInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDetachNetworkInterface = "DetachNetworkInterface" + +// DetachNetworkInterfaceRequest generates a request for the DetachNetworkInterface operation. +func (c *EC2) DetachNetworkInterfaceRequest(input *DetachNetworkInterfaceInput) (req *request.Request, output *DetachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDetachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachNetworkInterfaceOutput{} + req.Data = output + return +} + +// Detaches a network interface from an instance. +func (c *EC2) DetachNetworkInterface(input *DetachNetworkInterfaceInput) (*DetachNetworkInterfaceOutput, error) { + req, out := c.DetachNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDetachVolume = "DetachVolume" + +// DetachVolumeRequest generates a request for the DetachVolume operation. +func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opDetachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &VolumeAttachment{} + req.Data = output + return +} + +// Detaches an EBS volume from an instance. Make sure to unmount any file systems +// on the device within your operating system before detaching the volume. Failure +// to do so results in the volume being stuck in a busy state while detaching. +// +// If an Amazon EBS volume is the root device of an instance, it can't be detached +// while the instance is running. To detach the root volume, stop the instance +// first. +// +// When a volume with an AWS Marketplace product code is detached from an instance, +// the product code is no longer associated with the instance. +// +// For more information, see Detaching an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DetachVolume(input *DetachVolumeInput) (*VolumeAttachment, error) { + req, out := c.DetachVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDetachVpnGateway = "DetachVpnGateway" + +// DetachVpnGatewayRequest generates a request for the DetachVpnGateway operation. +func (c *EC2) DetachVpnGatewayRequest(input *DetachVpnGatewayInput) (req *request.Request, output *DetachVpnGatewayOutput) { + op := &request.Operation{ + Name: opDetachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachVpnGatewayOutput{} + req.Data = output + return +} + +// Detaches a virtual private gateway from a VPC. You do this if you're planning +// to turn off the VPC and not use it anymore. You can confirm a virtual private +// gateway has been completely detached from a VPC by describing the virtual +// private gateway (any attachments to the virtual private gateway are also +// described). +// +// You must wait for the attachment's state to switch to detached before you +// can delete the VPC or attach a different VPC to the virtual private gateway. +func (c *EC2) DetachVpnGateway(input *DetachVpnGatewayInput) (*DetachVpnGatewayOutput, error) { + req, out := c.DetachVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" + +// DisableVgwRoutePropagationRequest generates a request for the DisableVgwRoutePropagation operation. +func (c *EC2) DisableVgwRoutePropagationRequest(input *DisableVgwRoutePropagationInput) (req *request.Request, output *DisableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opDisableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVgwRoutePropagationInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVgwRoutePropagationOutput{} + req.Data = output + return +} + +// Disables a virtual private gateway (VGW) from propagating routes to a specified +// route table of a VPC. +func (c *EC2) DisableVgwRoutePropagation(input *DisableVgwRoutePropagationInput) (*DisableVgwRoutePropagationOutput, error) { + req, out := c.DisableVgwRoutePropagationRequest(input) + err := req.Send() + return out, err +} + +const opDisableVpcClassicLink = "DisableVpcClassicLink" + +// DisableVpcClassicLinkRequest generates a request for the DisableVpcClassicLink operation. +func (c *EC2) DisableVpcClassicLinkRequest(input *DisableVpcClassicLinkInput) (req *request.Request, output *DisableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVpcClassicLinkOutput{} + req.Data = output + return +} + +// Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC +// that has EC2-Classic instances linked to it. +func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*DisableVpcClassicLinkOutput, error) { + req, out := c.DisableVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateAddress = "DisassociateAddress" + +// DisassociateAddressRequest generates a request for the DisassociateAddress operation. +func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req *request.Request, output *DisassociateAddressOutput) { + op := &request.Operation{ + Name: opDisassociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateAddressOutput{} + req.Data = output + return +} + +// Disassociates an Elastic IP address from the instance or network interface +// it's associated with. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +func (c *EC2) DisassociateAddress(input *DisassociateAddressInput) (*DisassociateAddressOutput, error) { + req, out := c.DisassociateAddressRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateRouteTable = "DisassociateRouteTable" + +// DisassociateRouteTableRequest generates a request for the DisassociateRouteTable operation. +func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) (req *request.Request, output *DisassociateRouteTableOutput) { + op := &request.Operation{ + Name: opDisassociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateRouteTableOutput{} + req.Data = output + return +} + +// Disassociates a subnet from a route table. +// +// After you perform this action, the subnet no longer uses the routes in the +// route table. Instead, it uses the routes in the VPC's main route table. For +// more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DisassociateRouteTable(input *DisassociateRouteTableInput) (*DisassociateRouteTableOutput, error) { + req, out := c.DisassociateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" + +// EnableVgwRoutePropagationRequest generates a request for the EnableVgwRoutePropagation operation. +func (c *EC2) EnableVgwRoutePropagationRequest(input *EnableVgwRoutePropagationInput) (req *request.Request, output *EnableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opEnableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVgwRoutePropagationInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVgwRoutePropagationOutput{} + req.Data = output + return +} + +// Enables a virtual private gateway (VGW) to propagate routes to the specified +// route table of a VPC. +func (c *EC2) EnableVgwRoutePropagation(input *EnableVgwRoutePropagationInput) (*EnableVgwRoutePropagationOutput, error) { + req, out := c.EnableVgwRoutePropagationRequest(input) + err := req.Send() + return out, err +} + +const opEnableVolumeIO = "EnableVolumeIO" + +// EnableVolumeIORequest generates a request for the EnableVolumeIO operation. +func (c *EC2) EnableVolumeIORequest(input *EnableVolumeIOInput) (req *request.Request, output *EnableVolumeIOOutput) { + op := &request.Operation{ + Name: opEnableVolumeIO, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVolumeIOInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVolumeIOOutput{} + req.Data = output + return +} + +// Enables I/O operations for a volume that had I/O operations disabled because +// the data on the volume was potentially inconsistent. +func (c *EC2) EnableVolumeIO(input *EnableVolumeIOInput) (*EnableVolumeIOOutput, error) { + req, out := c.EnableVolumeIORequest(input) + err := req.Send() + return out, err +} + +const opEnableVpcClassicLink = "EnableVpcClassicLink" + +// EnableVpcClassicLinkRequest generates a request for the EnableVpcClassicLink operation. +func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req *request.Request, output *EnableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVpcClassicLinkOutput{} + req.Data = output + return +} + +// Enables a VPC for ClassicLink. You can then link EC2-Classic instances to +// your ClassicLink-enabled VPC to allow communication over private IP addresses. +// You cannot enable your VPC for ClassicLink if any of your VPC's route tables +// have existing routes for address ranges within the 10.0.0.0/8 IP address +// range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 +// IP address ranges. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpcClassicLinkOutput, error) { + req, out := c.EnableVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opGetConsoleOutput = "GetConsoleOutput" + +// GetConsoleOutputRequest generates a request for the GetConsoleOutput operation. +func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *request.Request, output *GetConsoleOutputOutput) { + op := &request.Operation{ + Name: opGetConsoleOutput, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConsoleOutputInput{} + } + + req = c.newRequest(op, input, output) + output = &GetConsoleOutputOutput{} + req.Data = output + return +} + +// Gets the console output for the specified instance. +// +// Instances do not have a physical monitor through which you can view their +// console output. They also lack physical controls that allow you to power +// up, reboot, or shut them down. To allow these actions, we provide them through +// the Amazon EC2 API and command line interface. +// +// Instance console output is buffered and posted shortly after instance boot, +// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output +// which is available for at least one hour after the most recent post. +// +// For Linux instances, the instance console output displays the exact console +// output that would normally be displayed on a physical monitor attached to +// a computer. This output is buffered because the instance produces it and +// then posts it to a store where the instance's owner can retrieve it. +// +// For Windows instances, the instance console output includes output from +// the EC2Config service. +func (c *EC2) GetConsoleOutput(input *GetConsoleOutputInput) (*GetConsoleOutputOutput, error) { + req, out := c.GetConsoleOutputRequest(input) + err := req.Send() + return out, err +} + +const opGetPasswordData = "GetPasswordData" + +// GetPasswordDataRequest generates a request for the GetPasswordData operation. +func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request.Request, output *GetPasswordDataOutput) { + op := &request.Operation{ + Name: opGetPasswordData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPasswordDataInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPasswordDataOutput{} + req.Data = output + return +} + +// Retrieves the encrypted administrator password for an instance running Windows. +// +// The Windows password is generated at boot if the EC2Config service plugin, +// Ec2SetPassword, is enabled. This usually only happens the first time an AMI +// is launched, and then Ec2SetPassword is automatically disabled. The password +// is not generated for rebundled AMIs unless Ec2SetPassword is enabled before +// bundling. +// +// The password is encrypted using the key pair that you specified when you +// launched the instance. You must provide the corresponding key pair file. +// +// Password generation and encryption takes a few moments. We recommend that +// you wait up to 15 minutes after launching an instance before trying to retrieve +// the generated password. +func (c *EC2) GetPasswordData(input *GetPasswordDataInput) (*GetPasswordDataOutput, error) { + req, out := c.GetPasswordDataRequest(input) + err := req.Send() + return out, err +} + +const opImportImage = "ImportImage" + +// ImportImageRequest generates a request for the ImportImage operation. +func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, output *ImportImageOutput) { + op := &request.Operation{ + Name: opImportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportImageInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportImageOutput{} + req.Data = output + return +} + +// Import single or multi-volume disk images or EBS snapshots into an Amazon +// Machine Image (AMI). +func (c *EC2) ImportImage(input *ImportImageInput) (*ImportImageOutput, error) { + req, out := c.ImportImageRequest(input) + err := req.Send() + return out, err +} + +const opImportInstance = "ImportInstance" + +// ImportInstanceRequest generates a request for the ImportInstance operation. +func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Request, output *ImportInstanceOutput) { + op := &request.Operation{ + Name: opImportInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportInstanceOutput{} + req.Data = output + return +} + +// Creates an import instance task using metadata from the specified disk image. +// ImportInstance only supports single-volume VMs. To import multi-volume VMs, +// use ImportImage. After importing the image, you then upload it using the +// ec2-import-volume command in the EC2 command line tools. For more information, +// see Using the Command Line Tools to Import Your Virtual Machine to Amazon +// EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportInstance(input *ImportInstanceInput) (*ImportInstanceOutput, error) { + req, out := c.ImportInstanceRequest(input) + err := req.Send() + return out, err +} + +const opImportKeyPair = "ImportKeyPair" + +// ImportKeyPairRequest generates a request for the ImportKeyPair operation. +func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) { + op := &request.Operation{ + Name: opImportKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportKeyPairOutput{} + req.Data = output + return +} + +// Imports the public key from an RSA key pair that you created with a third-party +// tool. Compare this with CreateKeyPair, in which AWS creates the key pair +// and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, +// you create the key pair and give AWS just the public key. The private key +// is never transferred between you and AWS. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opImportSnapshot = "ImportSnapshot" + +// ImportSnapshotRequest generates a request for the ImportSnapshot operation. +func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Request, output *ImportSnapshotOutput) { + op := &request.Operation{ + Name: opImportSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportSnapshotOutput{} + req.Data = output + return +} + +// Imports a disk into an EBS snapshot. +func (c *EC2) ImportSnapshot(input *ImportSnapshotInput) (*ImportSnapshotOutput, error) { + req, out := c.ImportSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opImportVolume = "ImportVolume" + +// ImportVolumeRequest generates a request for the ImportVolume operation. +func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Request, output *ImportVolumeOutput) { + op := &request.Operation{ + Name: opImportVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportVolumeOutput{} + req.Data = output + return +} + +// Creates an import volume task using metadata from the specified disk image. +// After importing the image, you then upload it using the ec2-import-volume +// command in the Amazon EC2 command-line interface (CLI) tools. For more information, +// see Using the Command Line Tools to Import Your Virtual Machine to Amazon +// EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportVolume(input *ImportVolumeInput) (*ImportVolumeOutput, error) { + req, out := c.ImportVolumeRequest(input) + err := req.Send() + return out, err +} + +const opModifyImageAttribute = "ModifyImageAttribute" + +// ModifyImageAttributeRequest generates a request for the ModifyImageAttribute operation. +func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req *request.Request, output *ModifyImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyImageAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +// +// AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace +// product code cannot be made public. +func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) { + req, out := c.ModifyImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyInstanceAttribute = "ModifyInstanceAttribute" + +// ModifyInstanceAttributeRequest generates a request for the ModifyInstanceAttribute operation. +func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput) (req *request.Request, output *ModifyInstanceAttributeOutput) { + op := &request.Operation{ + Name: opModifyInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyInstanceAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified instance. You can specify +// only one attribute at a time. +// +// To modify some attributes, the instance must be stopped. For more information, +// see Modifying Attributes of a Stopped Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifyInstanceAttribute(input *ModifyInstanceAttributeInput) (*ModifyInstanceAttributeOutput, error) { + req, out := c.ModifyInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" + +// ModifyNetworkInterfaceAttributeRequest generates a request for the ModifyNetworkInterfaceAttribute operation. +func (c *EC2) ModifyNetworkInterfaceAttributeRequest(input *ModifyNetworkInterfaceAttributeInput) (req *request.Request, output *ModifyNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opModifyNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified network interface attribute. You can specify only +// one attribute at a time. +func (c *EC2) ModifyNetworkInterfaceAttribute(input *ModifyNetworkInterfaceAttributeInput) (*ModifyNetworkInterfaceAttributeOutput, error) { + req, out := c.ModifyNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyReservedInstances = "ModifyReservedInstances" + +// ModifyReservedInstancesRequest generates a request for the ModifyReservedInstances operation. +func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput) (req *request.Request, output *ModifyReservedInstancesOutput) { + op := &request.Operation{ + Name: opModifyReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReservedInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReservedInstancesOutput{} + req.Data = output + return +} + +// Modifies the Availability Zone, instance count, instance type, or network +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, +// network platform, and instance type. +// +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifyReservedInstances(input *ModifyReservedInstancesInput) (*ModifyReservedInstancesOutput, error) { + req, out := c.ModifyReservedInstancesRequest(input) + err := req.Send() + return out, err +} + +const opModifySnapshotAttribute = "ModifySnapshotAttribute" + +// ModifySnapshotAttributeRequest generates a request for the ModifySnapshotAttribute operation. +func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput) (req *request.Request, output *ModifySnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifySnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds or removes permission settings for the specified snapshot. You may add +// or remove specified AWS account IDs from a snapshot's list of create volume +// permissions, but you cannot do both in a single API call. If you need to +// both add and remove account IDs for a snapshot, you must use multiple API +// calls. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Snapshots with AWS Marketplace product codes cannot be made public. +func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*ModifySnapshotAttributeOutput, error) { + req, out := c.ModifySnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifySpotFleetRequest = "ModifySpotFleetRequest" + +// ModifySpotFleetRequestRequest generates a request for the ModifySpotFleetRequest operation. +func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) (req *request.Request, output *ModifySpotFleetRequestOutput) { + op := &request.Operation{ + Name: opModifySpotFleetRequest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySpotFleetRequestInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySpotFleetRequestOutput{} + req.Data = output + return +} + +// Modifies the specified Spot fleet request. +// +// While the Spot fleet request is being modified, it is in the modifying state. +// +// To scale up your Spot fleet, increase its target capacity. The Spot fleet +// launches the additional Spot instances according to the allocation strategy +// for the Spot fleet request. If the allocation strategy is lowestPrice, the +// Spot fleet launches instances using the Spot pool with the lowest price. +// If the allocation strategy is diversified, the Spot fleet distributes the +// instances across the Spot pools. +// +// To scale down your Spot fleet, decrease its target capacity. First, the +// Spot fleet cancels any open bids that exceed the new target capacity. You +// can request that the Spot fleet terminate Spot instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the Spot fleet terminates the instances with the highest +// price per unit. If the allocation strategy is diversified, the Spot fleet +// terminates instances across the Spot pools. Alternatively, you can request +// that the Spot fleet keep the fleet at its current size, but not replace any +// Spot instances that are interrupted or that you terminate manually. +func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*ModifySpotFleetRequestOutput, error) { + req, out := c.ModifySpotFleetRequestRequest(input) + err := req.Send() + return out, err +} + +const opModifySubnetAttribute = "ModifySubnetAttribute" + +// ModifySubnetAttributeRequest generates a request for the ModifySubnetAttribute operation. +func (c *EC2) ModifySubnetAttributeRequest(input *ModifySubnetAttributeInput) (req *request.Request, output *ModifySubnetAttributeOutput) { + op := &request.Operation{ + Name: opModifySubnetAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySubnetAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySubnetAttributeOutput{} + req.Data = output + return +} + +// Modifies a subnet attribute. +func (c *EC2) ModifySubnetAttribute(input *ModifySubnetAttributeInput) (*ModifySubnetAttributeOutput, error) { + req, out := c.ModifySubnetAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVolumeAttribute = "ModifyVolumeAttribute" + +// ModifyVolumeAttributeRequest generates a request for the ModifyVolumeAttribute operation. +func (c *EC2) ModifyVolumeAttributeRequest(input *ModifyVolumeAttributeInput) (req *request.Request, output *ModifyVolumeAttributeOutput) { + op := &request.Operation{ + Name: opModifyVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVolumeAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVolumeAttributeOutput{} + req.Data = output + return +} + +// Modifies a volume attribute. +// +// By default, all I/O operations for the volume are suspended when the data +// on the volume is determined to be potentially inconsistent, to prevent undetectable, +// latent data corruption. The I/O access to the volume can be resumed by first +// enabling I/O access and then checking the data consistency on your volume. +// +// You can change the default behavior to resume I/O operations. We recommend +// that you change this only for boot volumes or for volumes that are stateless +// or disposable. +func (c *EC2) ModifyVolumeAttribute(input *ModifyVolumeAttributeInput) (*ModifyVolumeAttributeOutput, error) { + req, out := c.ModifyVolumeAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcAttribute = "ModifyVpcAttribute" + +// ModifyVpcAttributeRequest generates a request for the ModifyVpcAttribute operation. +func (c *EC2) ModifyVpcAttributeRequest(input *ModifyVpcAttributeInput) (req *request.Request, output *ModifyVpcAttributeOutput) { + op := &request.Operation{ + Name: opModifyVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVpcAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified VPC. +func (c *EC2) ModifyVpcAttribute(input *ModifyVpcAttributeInput) (*ModifyVpcAttributeOutput, error) { + req, out := c.ModifyVpcAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcEndpoint = "ModifyVpcEndpoint" + +// ModifyVpcEndpointRequest generates a request for the ModifyVpcEndpoint operation. +func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *request.Request, output *ModifyVpcEndpointOutput) { + op := &request.Operation{ + Name: opModifyVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVpcEndpointOutput{} + req.Data = output + return +} + +// Modifies attributes of a specified VPC endpoint. You can modify the policy +// associated with the endpoint, and you can add and remove route tables associated +// with the endpoint. +func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpointOutput, error) { + req, out := c.ModifyVpcEndpointRequest(input) + err := req.Send() + return out, err +} + +const opMonitorInstances = "MonitorInstances" + +// MonitorInstancesRequest generates a request for the MonitorInstances operation. +func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *request.Request, output *MonitorInstancesOutput) { + op := &request.Operation{ + Name: opMonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MonitorInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &MonitorInstancesOutput{} + req.Data = output + return +} + +// Enables monitoring for a running instance. For more information about monitoring +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) MonitorInstances(input *MonitorInstancesInput) (*MonitorInstancesOutput, error) { + req, out := c.MonitorInstancesRequest(input) + err := req.Send() + return out, err +} + +const opMoveAddressToVpc = "MoveAddressToVpc" + +// MoveAddressToVpcRequest generates a request for the MoveAddressToVpc operation. +func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *request.Request, output *MoveAddressToVpcOutput) { + op := &request.Operation{ + Name: opMoveAddressToVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MoveAddressToVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &MoveAddressToVpcOutput{} + req.Data = output + return +} + +// Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC +// platform. The Elastic IP address must be allocated to your account, and it +// must not be associated with an instance. After the Elastic IP address is +// moved, it is no longer available for use in the EC2-Classic platform, unless +// you move it back using the RestoreAddressToClassic request. You cannot move +// an Elastic IP address that's allocated for use in the EC2-VPC platform to +// the EC2-Classic platform. +func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) { + req, out := c.MoveAddressToVpcRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" + +// PurchaseReservedInstancesOfferingRequest generates a request for the PurchaseReservedInstancesOffering operation. +func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedInstancesOfferingInput) (req *request.Request, output *PurchaseReservedInstancesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedInstancesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedInstancesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedInstancesOfferingOutput{} + req.Data = output + return +} + +// Purchases a Reserved Instance for use with your account. With Amazon EC2 +// Reserved Instances, you obtain a capacity reservation for a certain instance +// configuration over a specified period of time and pay a lower hourly rate +// compared to on-Demand Instance pricing. +// +// Use DescribeReservedInstancesOfferings to get a list of Reserved Instance +// offerings that match your specifications. After you've purchased a Reserved +// Instance, you can check for your new Reserved Instance with DescribeReservedInstances. +// +// For more information, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// and Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstancesOfferingInput) (*PurchaseReservedInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedInstancesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootInstances = "RebootInstances" + +// RebootInstancesRequest generates a request for the RebootInstances operation. +func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.Request, output *RebootInstancesOutput) { + op := &request.Operation{ + Name: opRebootInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootInstancesOutput{} + req.Data = output + return +} + +// Requests a reboot of one or more instances. This operation is asynchronous; +// it only queues a request to reboot the specified instances. The operation +// succeeds if the instances are valid and belong to you. Requests to reboot +// terminated instances are ignored. +// +// If a Linux/Unix instance does not cleanly shut down within four minutes, +// Amazon EC2 performs a hard reboot. +// +// For more information about troubleshooting, see Getting Console Output and +// Rebooting Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RebootInstances(input *RebootInstancesInput) (*RebootInstancesOutput, error) { + req, out := c.RebootInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterImage = "RegisterImage" + +// RegisterImageRequest generates a request for the RegisterImage operation. +func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Request, output *RegisterImageOutput) { + op := &request.Operation{ + Name: opRegisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterImageInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterImageOutput{} + req.Data = output + return +} + +// Registers an AMI. When you're creating an AMI, this is the final step you +// must complete before you can launch an instance from the AMI. For more information +// about creating AMIs, see Creating Your Own AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For Amazon EBS-backed instances, CreateImage creates and registers the AMI +// in a single request, so you don't have to register the AMI yourself. +// +// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI +// from a snapshot of a root device volume. For more information, see Launching +// an Instance from a Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_LaunchingInstanceFromSnapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE +// Linux Enterprise Server (SLES), use the EC2 billingProduct code associated +// with an AMI to verify subscription status for package updates. Creating an +// AMI from an EBS snapshot does not maintain this billing code, and subsequent +// instances launched from such an AMI will not be able to connect to package +// update infrastructure. +// +// Similarly, although you can create a Windows AMI from a snapshot, you can't +// successfully launch an instance from the AMI. +// +// To create Windows AMIs or to create AMIs for Linux operating systems that +// must retain AMI billing codes to work properly, see CreateImage. +// +// If needed, you can deregister an AMI at any time. Any modifications you +// make to an AMI backed by an instance store volume invalidates its registration. +// If you make changes to an image, deregister the previous image and register +// the new image. +// +// You can't register an image where a secondary (non-root) snapshot has AWS +// Marketplace product codes. +func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, error) { + req, out := c.RegisterImageRequest(input) + err := req.Send() + return out, err +} + +const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" + +// RejectVpcPeeringConnectionRequest generates a request for the RejectVpcPeeringConnection operation. +func (c *EC2) RejectVpcPeeringConnectionRequest(input *RejectVpcPeeringConnectionInput) (req *request.Request, output *RejectVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opRejectVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RejectVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &RejectVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Rejects a VPC peering connection request. The VPC peering connection must +// be in the pending-acceptance state. Use the DescribeVpcPeeringConnections +// request to view your outstanding VPC peering connection requests. To delete +// an active VPC peering connection, or to delete a VPC peering connection request +// that you initiated, use DeleteVpcPeeringConnection. +func (c *EC2) RejectVpcPeeringConnection(input *RejectVpcPeeringConnectionInput) (*RejectVpcPeeringConnectionOutput, error) { + req, out := c.RejectVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opReleaseAddress = "ReleaseAddress" + +// ReleaseAddressRequest generates a request for the ReleaseAddress operation. +func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Request, output *ReleaseAddressOutput) { + op := &request.Operation{ + Name: opReleaseAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &ReleaseAddressOutput{} + req.Data = output + return +} + +// Releases the specified Elastic IP address. +// +// After releasing an Elastic IP address, it is released to the IP address +// pool and might be unavailable to you. Be sure to update your DNS records +// and any servers or devices that communicate with the address. If you attempt +// to release an Elastic IP address that you already released, you'll get an +// AuthFailure error if the address is already allocated to another AWS account. +// +// [EC2-Classic, default VPC] Releasing an Elastic IP address automatically +// disassociates it from any instance that it's associated with. To disassociate +// an Elastic IP address without releasing it, use DisassociateAddress. +// +// [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic +// IP address before you try to release it. Otherwise, Amazon EC2 returns an +// error (InvalidIPAddress.InUse). +func (c *EC2) ReleaseAddress(input *ReleaseAddressInput) (*ReleaseAddressOutput, error) { + req, out := c.ReleaseAddressRequest(input) + err := req.Send() + return out, err +} + +const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" + +// ReplaceNetworkAclAssociationRequest generates a request for the ReplaceNetworkAclAssociation operation. +func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssociationInput) (req *request.Request, output *ReplaceNetworkAclAssociationOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceNetworkAclAssociationOutput{} + req.Data = output + return +} + +// Changes which network ACL a subnet is associated with. By default when you +// create a subnet, it's automatically associated with the default network ACL. +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceNetworkAclAssociation(input *ReplaceNetworkAclAssociationInput) (*ReplaceNetworkAclAssociationOutput, error) { + req, out := c.ReplaceNetworkAclAssociationRequest(input) + err := req.Send() + return out, err +} + +const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" + +// ReplaceNetworkAclEntryRequest generates a request for the ReplaceNetworkAclEntry operation. +func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) (req *request.Request, output *ReplaceNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceNetworkAclEntryOutput{} + req.Data = output + return +} + +// Replaces an entry (rule) in a network ACL. For more information about network +// ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceNetworkAclEntry(input *ReplaceNetworkAclEntryInput) (*ReplaceNetworkAclEntryOutput, error) { + req, out := c.ReplaceNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opReplaceRoute = "ReplaceRoute" + +// ReplaceRouteRequest generates a request for the ReplaceRoute operation. +func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Request, output *ReplaceRouteOutput) { + op := &request.Operation{ + Name: opReplaceRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceRouteOutput{} + req.Data = output + return +} + +// Replaces an existing route within a route table in a VPC. You must provide +// only one of the following: Internet gateway or virtual private gateway, NAT +// instance, VPC peering connection, or network interface. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceRoute(input *ReplaceRouteInput) (*ReplaceRouteOutput, error) { + req, out := c.ReplaceRouteRequest(input) + err := req.Send() + return out, err +} + +const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" + +// ReplaceRouteTableAssociationRequest generates a request for the ReplaceRouteTableAssociation operation. +func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssociationInput) (req *request.Request, output *ReplaceRouteTableAssociationOutput) { + op := &request.Operation{ + Name: opReplaceRouteTableAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteTableAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceRouteTableAssociationOutput{} + req.Data = output + return +} + +// Changes the route table associated with a given subnet in a VPC. After the +// operation completes, the subnet uses the routes in the new route table it's +// associated with. For more information about route tables, see Route Tables +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can also use ReplaceRouteTableAssociation to change which table is the +// main route table in the VPC. You just specify the main route table's association +// ID and the route table to be the new main route table. +func (c *EC2) ReplaceRouteTableAssociation(input *ReplaceRouteTableAssociationInput) (*ReplaceRouteTableAssociationOutput, error) { + req, out := c.ReplaceRouteTableAssociationRequest(input) + err := req.Send() + return out, err +} + +const opReportInstanceStatus = "ReportInstanceStatus" + +// ReportInstanceStatusRequest generates a request for the ReportInstanceStatus operation. +func (c *EC2) ReportInstanceStatusRequest(input *ReportInstanceStatusInput) (req *request.Request, output *ReportInstanceStatusOutput) { + op := &request.Operation{ + Name: opReportInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportInstanceStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &ReportInstanceStatusOutput{} + req.Data = output + return +} + +// Submits feedback about the status of an instance. The instance must be in +// the running state. If your experience with the instance differs from the +// instance status returned by DescribeInstanceStatus, use ReportInstanceStatus +// to report your experience with the instance. Amazon EC2 collects this information +// to improve the accuracy of status checks. +// +// Use of this action does not change the value returned by DescribeInstanceStatus. +func (c *EC2) ReportInstanceStatus(input *ReportInstanceStatusInput) (*ReportInstanceStatusOutput, error) { + req, out := c.ReportInstanceStatusRequest(input) + err := req.Send() + return out, err +} + +const opRequestSpotFleet = "RequestSpotFleet" + +// RequestSpotFleetRequest generates a request for the RequestSpotFleet operation. +func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *request.Request, output *RequestSpotFleetOutput) { + op := &request.Operation{ + Name: opRequestSpotFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotFleetInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestSpotFleetOutput{} + req.Data = output + return +} + +// Creates a Spot fleet request. +// +// You can submit a single request that includes multiple launch specifications +// that vary by instance type, AMI, Availability Zone, or subnet. +// +// By default, the Spot fleet requests Spot instances in the Spot pool where +// the price per unit is the lowest. Each launch specification can include its +// own instance weighting that reflects the value of the instance type to your +// application workload. +// +// Alternatively, you can specify that the Spot fleet distribute the target +// capacity across the Spot pools included in its launch specifications. By +// ensuring that the Spot instances in your Spot fleet are in different Spot +// pools, you can improve the availability of your fleet. +// +// For more information, see Spot Fleet Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RequestSpotFleet(input *RequestSpotFleetInput) (*RequestSpotFleetOutput, error) { + req, out := c.RequestSpotFleetRequest(input) + err := req.Send() + return out, err +} + +const opRequestSpotInstances = "RequestSpotInstances" + +// RequestSpotInstancesRequest generates a request for the RequestSpotInstances operation. +func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req *request.Request, output *RequestSpotInstancesOutput) { + op := &request.Operation{ + Name: opRequestSpotInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestSpotInstancesOutput{} + req.Data = output + return +} + +// Creates a Spot instance request. Spot instances are instances that Amazon +// EC2 launches when the bid price that you specify exceeds the current Spot +// price. Amazon EC2 periodically sets the Spot price based on available Spot +// Instance capacity and current Spot instance requests. For more information, +// see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RequestSpotInstances(input *RequestSpotInstancesInput) (*RequestSpotInstancesOutput, error) { + req, out := c.RequestSpotInstancesRequest(input) + err := req.Send() + return out, err +} + +const opResetImageAttribute = "ResetImageAttribute" + +// ResetImageAttributeRequest generates a request for the ResetImageAttribute operation. +func (c *EC2) ResetImageAttributeRequest(input *ResetImageAttributeInput) (req *request.Request, output *ResetImageAttributeOutput) { + op := &request.Operation{ + Name: opResetImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetImageAttributeOutput{} + req.Data = output + return +} + +// Resets an attribute of an AMI to its default value. +// +// The productCodes attribute can't be reset. +func (c *EC2) ResetImageAttribute(input *ResetImageAttributeInput) (*ResetImageAttributeOutput, error) { + req, out := c.ResetImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetInstanceAttribute = "ResetInstanceAttribute" + +// ResetInstanceAttributeRequest generates a request for the ResetInstanceAttribute operation. +func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) (req *request.Request, output *ResetInstanceAttributeOutput) { + op := &request.Operation{ + Name: opResetInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetInstanceAttributeOutput{} + req.Data = output + return +} + +// Resets an attribute of an instance to its default value. To reset the kernel +// or ramdisk, the instance must be in a stopped state. To reset the SourceDestCheck, +// the instance can be either running or stopped. +// +// The SourceDestCheck attribute controls whether source/destination checking +// is enabled. The default value is true, which means checking is enabled. This +// value must be false for a NAT instance to perform NAT. For more information, +// see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ResetInstanceAttribute(input *ResetInstanceAttributeInput) (*ResetInstanceAttributeOutput, error) { + req, out := c.ResetInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" + +// ResetNetworkInterfaceAttributeRequest generates a request for the ResetNetworkInterfaceAttribute operation. +func (c *EC2) ResetNetworkInterfaceAttributeRequest(input *ResetNetworkInterfaceAttributeInput) (req *request.Request, output *ResetNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opResetNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Resets a network interface attribute. You can specify only one attribute +// at a time. +func (c *EC2) ResetNetworkInterfaceAttribute(input *ResetNetworkInterfaceAttributeInput) (*ResetNetworkInterfaceAttributeOutput, error) { + req, out := c.ResetNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetSnapshotAttribute = "ResetSnapshotAttribute" + +// ResetSnapshotAttributeRequest generates a request for the ResetSnapshotAttribute operation. +func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) (req *request.Request, output *ResetSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opResetSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetSnapshotAttributeOutput{} + req.Data = output + return +} + +// Resets permission settings for the specified snapshot. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ResetSnapshotAttribute(input *ResetSnapshotAttributeInput) (*ResetSnapshotAttributeOutput, error) { + req, out := c.ResetSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opRestoreAddressToClassic = "RestoreAddressToClassic" + +// RestoreAddressToClassicRequest generates a request for the RestoreAddressToClassic operation. +func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput) (req *request.Request, output *RestoreAddressToClassicOutput) { + op := &request.Operation{ + Name: opRestoreAddressToClassic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreAddressToClassicInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreAddressToClassicOutput{} + req.Data = output + return +} + +// Restores an Elastic IP address that was previously moved to the EC2-VPC platform +// back to the EC2-Classic platform. You cannot move an Elastic IP address that +// was originally allocated for use in EC2-VPC. The Elastic IP address must +// not be associated with an instance or network interface. +func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) { + req, out := c.RestoreAddressToClassicRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" + +// RevokeSecurityGroupEgressRequest generates a request for the RevokeSecurityGroupEgress operation. +func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressInput) (req *request.Request, output *RevokeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupEgressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeSecurityGroupEgressOutput{} + req.Data = output + return +} + +// Removes one or more egress rules from a security group for EC2-VPC. The values +// that you specify in the revoke request (for example, ports) must match the +// existing rule's values for the rule to be revoked. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +func (c *EC2) RevokeSecurityGroupEgress(input *RevokeSecurityGroupEgressInput) (*RevokeSecurityGroupEgressOutput, error) { + req, out := c.RevokeSecurityGroupEgressRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" + +// RevokeSecurityGroupIngressRequest generates a request for the RevokeSecurityGroupIngress operation. +func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngressInput) (req *request.Request, output *RevokeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Removes one or more ingress rules from a security group. The values that +// you specify in the revoke request (for example, ports) must match the existing +// rule's values for the rule to be removed. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +func (c *EC2) RevokeSecurityGroupIngress(input *RevokeSecurityGroupIngressInput) (*RevokeSecurityGroupIngressOutput, error) { + req, out := c.RevokeSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opRunInstances = "RunInstances" + +// RunInstancesRequest generates a request for the RunInstances operation. +func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Request, output *Reservation) { + op := &request.Operation{ + Name: opRunInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &Reservation{} + req.Data = output + return +} + +// Launches the specified number of instances using an AMI for which you have +// permissions. +// +// When you launch an instance, it enters the pending state. After the instance +// is ready for you, it enters the running state. To check the state of your +// instance, call DescribeInstances. +// +// If you don't specify a security group when launching an instance, Amazon +// EC2 uses the default security group. For more information, see Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-VPC only accounts] If you don't specify a subnet in the request, we +// choose a default subnet from your default VPC for you. +// +// [EC2-Classic accounts] If you're launching into EC2-Classic and you don't +// specify an Availability Zone, we choose one for you. +// +// Linux instances have access to the public key of the key pair at boot. You +// can use this key to provide secure access to the instance. Amazon EC2 public +// images use this feature to provide secure access without passwords. For more +// information, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can provide optional user data when launching an instance. For more +// information, see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If any of the AMIs have a product code attached for which the user has not +// subscribed, RunInstances fails. +// +// T2 instance types can only be launched into a VPC. If you do not have a +// default VPC, or if you do not specify a subnet ID in the request, RunInstances +// fails. +// +// For more information about troubleshooting, see What To Do If An Instance +// Immediately Terminates (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), +// and Troubleshooting Connecting to Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) { + req, out := c.RunInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStartInstances = "StartInstances" + +// StartInstancesRequest generates a request for the StartInstances operation. +func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Request, output *StartInstancesOutput) { + op := &request.Operation{ + Name: opStartInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &StartInstancesOutput{} + req.Data = output + return +} + +// Starts an Amazon EBS-backed AMI that you've previously stopped. +// +// Instances that use Amazon EBS volumes as their root devices can be quickly +// stopped and started. When an instance is stopped, the compute resources are +// released and you are not billed for hourly instance usage. However, your +// root partition Amazon EBS volume remains, continues to persist your data, +// and you are charged for Amazon EBS volume usage. You can restart your instance +// at any time. Each time you transition an instance from stopped to started, +// Amazon EC2 charges a full instance hour, even if transitions happen multiple +// times within a single hour. +// +// Before stopping an instance, make sure it is in a state from which it can +// be restarted. Stopping an instance does not preserve data stored in RAM. +// +// Performing this operation on an instance that uses an instance store as +// its root device returns an error. +// +// For more information, see Stopping Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) StartInstances(input *StartInstancesInput) (*StartInstancesOutput, error) { + req, out := c.StartInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStopInstances = "StopInstances" + +// StopInstancesRequest generates a request for the StopInstances operation. +func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Request, output *StopInstancesOutput) { + op := &request.Operation{ + Name: opStopInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &StopInstancesOutput{} + req.Data = output + return +} + +// Stops an Amazon EBS-backed instance. Each time you transition an instance +// from stopped to started, Amazon EC2 charges a full instance hour, even if +// transitions happen multiple times within a single hour. +// +// You can't start or stop Spot Instances. +// +// Instances that use Amazon EBS volumes as their root devices can be quickly +// stopped and started. When an instance is stopped, the compute resources are +// released and you are not billed for hourly instance usage. However, your +// root partition Amazon EBS volume remains, continues to persist your data, +// and you are charged for Amazon EBS volume usage. You can restart your instance +// at any time. +// +// Before stopping an instance, make sure it is in a state from which it can +// be restarted. Stopping an instance does not preserve data stored in RAM. +// +// Performing this operation on an instance that uses an instance store as +// its root device returns an error. +// +// You can stop, start, and terminate EBS-backed instances. You can only terminate +// instance store-backed instances. What happens to an instance differs if you +// stop it or terminate it. For example, when you stop an instance, the root +// device and any other devices attached to the instance persist. When you terminate +// an instance, the root device and any other devices attached during the instance +// launch are automatically deleted. For more information about the differences +// between stopping and terminating instances, see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information about troubleshooting, see Troubleshooting Stopping +// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, error) { + req, out := c.StopInstancesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateInstances = "TerminateInstances" + +// TerminateInstancesRequest generates a request for the TerminateInstances operation. +func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *request.Request, output *TerminateInstancesOutput) { + op := &request.Operation{ + Name: opTerminateInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateInstancesOutput{} + req.Data = output + return +} + +// Shuts down one or more instances. This operation is idempotent; if you terminate +// an instance more than once, each call succeeds. +// +// Terminated instances remain visible after termination (for approximately +// one hour). +// +// By default, Amazon EC2 deletes all EBS volumes that were attached when the +// instance launched. Volumes attached after instance launch continue running. +// +// You can stop, start, and terminate EBS-backed instances. You can only terminate +// instance store-backed instances. What happens to an instance differs if you +// stop it or terminate it. For example, when you stop an instance, the root +// device and any other devices attached to the instance persist. When you terminate +// an instance, any attached EBS volumes with the DeleteOnTermination block +// device mapping parameter set to true are automatically deleted. For more +// information about the differences between stopping and terminating instances, +// see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information about troubleshooting, see Troubleshooting Terminating +// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) TerminateInstances(input *TerminateInstancesInput) (*TerminateInstancesOutput, error) { + req, out := c.TerminateInstancesRequest(input) + err := req.Send() + return out, err +} + +const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" + +// UnassignPrivateIpAddressesRequest generates a request for the UnassignPrivateIpAddresses operation. +func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddressesInput) (req *request.Request, output *UnassignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opUnassignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignPrivateIpAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &UnassignPrivateIpAddressesOutput{} + req.Data = output + return +} + +// Unassigns one or more secondary private IP addresses from a network interface. +func (c *EC2) UnassignPrivateIpAddresses(input *UnassignPrivateIpAddressesInput) (*UnassignPrivateIpAddressesOutput, error) { + req, out := c.UnassignPrivateIpAddressesRequest(input) + err := req.Send() + return out, err +} + +const opUnmonitorInstances = "UnmonitorInstances" + +// UnmonitorInstancesRequest generates a request for the UnmonitorInstances operation. +func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *request.Request, output *UnmonitorInstancesOutput) { + op := &request.Operation{ + Name: opUnmonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnmonitorInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &UnmonitorInstancesOutput{} + req.Data = output + return +} + +// Disables monitoring for a running instance. For more information about monitoring +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) UnmonitorInstances(input *UnmonitorInstancesInput) (*UnmonitorInstancesOutput, error) { + req, out := c.UnmonitorInstancesRequest(input) + err := req.Send() + return out, err +} + +type AcceptVpcPeeringConnectionInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` + + metadataAcceptVpcPeeringConnectionInput `json:"-" xml:"-"` +} + +type metadataAcceptVpcPeeringConnectionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type AcceptVpcPeeringConnectionOutput struct { + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` + + metadataAcceptVpcPeeringConnectionOutput `json:"-" xml:"-"` +} + +type metadataAcceptVpcPeeringConnectionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Describes an account attribute. +type AccountAttribute struct { + // The name of the account attribute. + AttributeName *string `locationName:"attributeName" type:"string"` + + // One or more values for the account attribute. + AttributeValues []*AccountAttributeValue `locationName:"attributeValueSet" locationNameList:"item" type:"list"` + + metadataAccountAttribute `json:"-" xml:"-"` +} + +type metadataAccountAttribute struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AccountAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttribute) GoString() string { + return s.String() +} + +// Describes a value of an account attribute. +type AccountAttributeValue struct { + // The value of the attribute. + AttributeValue *string `locationName:"attributeValue" type:"string"` + + metadataAccountAttributeValue `json:"-" xml:"-"` +} + +type metadataAccountAttributeValue struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AccountAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttributeValue) GoString() string { + return s.String() +} + +// Describes a running instance in a Spot fleet. +type ActiveInstance struct { + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + metadataActiveInstance `json:"-" xml:"-"` +} + +type metadataActiveInstance struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ActiveInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveInstance) GoString() string { + return s.String() +} + +// Describes an Elastic IP address. +type Address struct { + // The ID representing the allocation of the address for use with EC2-VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID representing the association of the address with an instance in a + // VPC. + AssociationId *string `locationName:"associationId" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The ID of the instance that the address is associated with (if any). + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that owns the network interface. + NetworkInterfaceOwnerId *string `locationName:"networkInterfaceOwnerId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + metadataAddress `json:"-" xml:"-"` +} + +type metadataAddress struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Address) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Address) GoString() string { + return s.String() +} + +type AllocateAddressInput struct { + // Set to vpc to allocate the address for use with instances in a VPC. + // + // Default: The address is for use with instances in EC2-Classic. + Domain *string `type:"string" enum:"DomainType"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataAllocateAddressInput `json:"-" xml:"-"` +} + +type metadataAllocateAddressInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AllocateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressInput) GoString() string { + return s.String() +} + +type AllocateAddressOutput struct { + // [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic + // IP address for use with instances in a VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + metadataAllocateAddressOutput `json:"-" xml:"-"` +} + +type metadataAllocateAddressOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AllocateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressOutput) GoString() string { + return s.String() +} + +type AssignPrivateIpAddressesInput struct { + // Indicates whether to allow an IP address that is already assigned to another + // network interface or instance to be reassigned to the specified network interface. + AllowReassignment *bool `locationName:"allowReassignment" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // One or more IP addresses to be assigned as a secondary private IP address + // to the network interface. You can't specify this parameter when also specifying + // a number of secondary IP addresses. + // + // If you don't specify an IP address, Amazon EC2 automatically selects an + // IP address within the subnet range. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list"` + + // The number of secondary IP addresses to assign to the network interface. + // You can't specify this parameter when also specifying private IP addresses. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + metadataAssignPrivateIpAddressesInput `json:"-" xml:"-"` +} + +type metadataAssignPrivateIpAddressesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +type AssignPrivateIpAddressesOutput struct { + metadataAssignPrivateIpAddressesOutput `json:"-" xml:"-"` +} + +type metadataAssignPrivateIpAddressesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +type AssociateAddressInput struct { + // [EC2-VPC] The allocation ID. This is required for EC2-VPC. + AllocationId *string `type:"string"` + + // [EC2-VPC] Allows an Elastic IP address that is already associated with an + // instance or network interface to be re-associated with the specified instance + // or network interface. Otherwise, the operation fails. + // + // Default: false + AllowReassociation *bool `locationName:"allowReassociation" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you + // can specify either the instance ID or the network interface ID, but not both. + // The operation fails if you specify an instance ID unless exactly one network + // interface is attached. + InstanceId *string `type:"string"` + + // [EC2-VPC] The ID of the network interface. If the instance has more than + // one network interface, you must specify a network interface ID. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // [EC2-VPC] The primary or secondary private IP address to associate with the + // Elastic IP address. If no private IP address is specified, the Elastic IP + // address is associated with the primary private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. This is required for EC2-Classic. + PublicIp *string `type:"string"` + + metadataAssociateAddressInput `json:"-" xml:"-"` +} + +type metadataAssociateAddressInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AssociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressInput) GoString() string { + return s.String() +} + +type AssociateAddressOutput struct { + // [EC2-VPC] The ID that represents the association of the Elastic IP address + // with an instance. + AssociationId *string `locationName:"associationId" type:"string"` + + metadataAssociateAddressOutput `json:"-" xml:"-"` +} + +type metadataAssociateAddressOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AssociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressOutput) GoString() string { + return s.String() +} + +type AssociateDhcpOptionsInput struct { + // The ID of the DHCP options set, or default to associate no DHCP options with + // the VPC. + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + metadataAssociateDhcpOptionsInput `json:"-" xml:"-"` +} + +type metadataAssociateDhcpOptionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsInput) GoString() string { + return s.String() +} + +type AssociateDhcpOptionsOutput struct { + metadataAssociateDhcpOptionsOutput `json:"-" xml:"-"` +} + +type metadataAssociateDhcpOptionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsOutput) GoString() string { + return s.String() +} + +type AssociateRouteTableInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` + + metadataAssociateRouteTableInput `json:"-" xml:"-"` +} + +type metadataAssociateRouteTableInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AssociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableInput) GoString() string { + return s.String() +} + +type AssociateRouteTableOutput struct { + // The route table association ID (needed to disassociate the route table). + AssociationId *string `locationName:"associationId" type:"string"` + + metadataAssociateRouteTableOutput `json:"-" xml:"-"` +} + +type metadataAssociateRouteTableOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AssociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableOutput) GoString() string { + return s.String() +} + +type AttachClassicLinkVpcInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of one or more of the VPC's security groups. You cannot specify security + // groups from a different VPC. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"groupId" type:"list" required:"true"` + + // The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of a ClassicLink-enabled VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataAttachClassicLinkVpcInput `json:"-" xml:"-"` +} + +type metadataAttachClassicLinkVpcInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcInput) GoString() string { + return s.String() +} + +type AttachClassicLinkVpcOutput struct { + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` + + metadataAttachClassicLinkVpcOutput `json:"-" xml:"-"` +} + +type metadataAttachClassicLinkVpcOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +type AttachInternetGatewayInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataAttachInternetGatewayInput `json:"-" xml:"-"` +} + +type metadataAttachInternetGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayInput) GoString() string { + return s.String() +} + +type AttachInternetGatewayOutput struct { + metadataAttachInternetGatewayOutput `json:"-" xml:"-"` +} + +type metadataAttachInternetGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayOutput) GoString() string { + return s.String() +} + +type AttachNetworkInterfaceInput struct { + // The index of the device for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + metadataAttachNetworkInterfaceInput `json:"-" xml:"-"` +} + +type metadataAttachNetworkInterfaceInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceInput) GoString() string { + return s.String() +} + +type AttachNetworkInterfaceOutput struct { + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + metadataAttachNetworkInterfaceOutput `json:"-" xml:"-"` +} + +type metadataAttachNetworkInterfaceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type AttachVolumeInput struct { + // The device name to expose to the instance (for example, /dev/sdh or xvdh). + Device *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The ID of the EBS volume. The volume and instance must be within the same + // Availability Zone. + VolumeId *string `type:"string" required:"true"` + + metadataAttachVolumeInput `json:"-" xml:"-"` +} + +type metadataAttachVolumeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVolumeInput) GoString() string { + return s.String() +} + +type AttachVpnGatewayInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` + + metadataAttachVpnGatewayInput `json:"-" xml:"-"` +} + +type metadataAttachVpnGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayInput) GoString() string { + return s.String() +} + +type AttachVpnGatewayOutput struct { + // Information about the attachment. + VpcAttachment *VpcAttachment `locationName:"attachment" type:"structure"` + + metadataAttachVpnGatewayOutput `json:"-" xml:"-"` +} + +type metadataAttachVpnGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayOutput) GoString() string { + return s.String() +} + +// The value to use when a resource attribute accepts a Boolean value. +type AttributeBooleanValue struct { + // Valid values are true or false. + Value *bool `locationName:"value" type:"boolean"` + + metadataAttributeBooleanValue `json:"-" xml:"-"` +} + +type metadataAttributeBooleanValue struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttributeBooleanValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeBooleanValue) GoString() string { + return s.String() +} + +// The value to use for a resource attribute. +type AttributeValue struct { + // Valid values are case-sensitive and vary by action. + Value *string `locationName:"value" type:"string"` + + metadataAttributeValue `json:"-" xml:"-"` +} + +type metadataAttributeValue struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupEgressInput struct { + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // A set of IP permissions. You can't specify a destination security group and + // a CIDR IP address range. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // Use -1 to specify all. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The name of a destination security group. To authorize outbound access to + // a destination security group, we recommend that you use a set of IP permissions + // instead. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // The AWS account number for a destination security group. To authorize outbound + // access to a destination security group, we recommend that you use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `locationName:"toPort" type:"integer"` + + metadataAuthorizeSecurityGroupEgressInput `json:"-" xml:"-"` +} + +type metadataAuthorizeSecurityGroupEgressInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupEgressOutput struct { + metadataAuthorizeSecurityGroupEgressOutput `json:"-" xml:"-"` +} + +type metadataAuthorizeSecurityGroupEgressOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupIngressInput struct { + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. + GroupName *string `type:"string"` + + // A set of IP permissions. Can be used to specify multiple rules in a single + // command. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // (VPC only) Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. For EC2-VPC, the source security group must be + // in the same VPC. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic, default VPC] The AWS account number for the source security + // group. For EC2-VPC, the source security group must be in the same VPC. You + // can't specify this parameter in combination with the following parameters: + // the CIDR IP address range, the IP protocol, the start of the port range, + // and the end of the port range. Creates rules that grant full ICMP, UDP, and + // TCP access. To create a rule with a specific IP protocol and port range, + // use a set of IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` + + metadataAuthorizeSecurityGroupIngressInput `json:"-" xml:"-"` +} + +type metadataAuthorizeSecurityGroupIngressInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupIngressOutput struct { + metadataAuthorizeSecurityGroupIngressOutput `json:"-" xml:"-"` +} + +type metadataAuthorizeSecurityGroupIngressOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes an Availability Zone. +type AvailabilityZone struct { + // Any messages about the Availability Zone. + Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` + + // The name of the region. + RegionName *string `locationName:"regionName" type:"string"` + + // The state of the Availability Zone. + State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` + + // The name of the Availability Zone. + ZoneName *string `locationName:"zoneName" type:"string"` + + metadataAvailabilityZone `json:"-" xml:"-"` +} + +type metadataAvailabilityZone struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Describes a message about an Availability Zone. +type AvailabilityZoneMessage struct { + // The message about the Availability Zone. + Message *string `locationName:"message" type:"string"` + + metadataAvailabilityZoneMessage `json:"-" xml:"-"` +} + +type metadataAvailabilityZoneMessage struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s AvailabilityZoneMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZoneMessage) GoString() string { + return s.String() +} + +type BlobAttributeValue struct { + Value []byte `locationName:"value" type:"blob"` + + metadataBlobAttributeValue `json:"-" xml:"-"` +} + +type metadataBlobAttributeValue struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s BlobAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlobAttributeValue) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type BlockDeviceMapping struct { + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsBlockDevice `locationName:"ebs" type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with 2 available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `locationName:"virtualName" type:"string"` + + metadataBlockDeviceMapping `json:"-" xml:"-"` +} + +type metadataBlockDeviceMapping struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +type BundleInstanceInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to bundle. + // + // Type: String + // + // Default: None + // + // Required: Yes + InstanceId *string `type:"string" required:"true"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Storage *Storage `type:"structure" required:"true"` + + metadataBundleInstanceInput `json:"-" xml:"-"` +} + +type metadataBundleInstanceInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s BundleInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceInput) GoString() string { + return s.String() +} + +type BundleInstanceOutput struct { + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` + + metadataBundleInstanceOutput `json:"-" xml:"-"` +} + +type metadataBundleInstanceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s BundleInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceOutput) GoString() string { + return s.String() +} + +// Describes a bundle task. +type BundleTask struct { + // The ID of the bundle task. + BundleId *string `locationName:"bundleId" type:"string"` + + // If the task fails, a description of the error. + BundleTaskError *BundleTaskError `locationName:"error" type:"structure"` + + // The ID of the instance associated with this bundle task. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The level of task completion, as a percent (for example, 20%). + Progress *string `locationName:"progress" type:"string"` + + // The time this task started. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The state of the task. + State *string `locationName:"state" type:"string" enum:"BundleTaskState"` + + // The Amazon S3 storage locations. + Storage *Storage `locationName:"storage" type:"structure"` + + // The time of the most recent update for the task. + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` + + metadataBundleTask `json:"-" xml:"-"` +} + +type metadataBundleTask struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s BundleTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTask) GoString() string { + return s.String() +} + +// Describes an error for BundleInstance. +type BundleTaskError struct { + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` + + metadataBundleTaskError `json:"-" xml:"-"` +} + +type metadataBundleTaskError struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s BundleTaskError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTaskError) GoString() string { + return s.String() +} + +type CancelBundleTaskInput struct { + // The ID of the bundle task. + BundleId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataCancelBundleTaskInput `json:"-" xml:"-"` +} + +type metadataCancelBundleTaskInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelBundleTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskInput) GoString() string { + return s.String() +} + +type CancelBundleTaskOutput struct { + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` + + metadataCancelBundleTaskOutput `json:"-" xml:"-"` +} + +type metadataCancelBundleTaskOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelBundleTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskOutput) GoString() string { + return s.String() +} + +type CancelConversionTaskInput struct { + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The reason for canceling the conversion task. + ReasonMessage *string `locationName:"reasonMessage" type:"string"` + + metadataCancelConversionTaskInput `json:"-" xml:"-"` +} + +type metadataCancelConversionTaskInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelConversionTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskInput) GoString() string { + return s.String() +} + +type CancelConversionTaskOutput struct { + metadataCancelConversionTaskOutput `json:"-" xml:"-"` +} + +type metadataCancelConversionTaskOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelConversionTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskOutput) GoString() string { + return s.String() +} + +type CancelExportTaskInput struct { + // The ID of the export task. This is the ID returned by CreateInstanceExportTask. + ExportTaskId *string `locationName:"exportTaskId" type:"string" required:"true"` + + metadataCancelExportTaskInput `json:"-" xml:"-"` +} + +type metadataCancelExportTaskInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +type CancelExportTaskOutput struct { + metadataCancelExportTaskOutput `json:"-" xml:"-"` +} + +type metadataCancelExportTaskOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CancelImportTaskInput struct { + // The reason for canceling the task. + CancelReason *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the import image or import snapshot task to be canceled. + ImportTaskId *string `type:"string"` + + metadataCancelImportTaskInput `json:"-" xml:"-"` +} + +type metadataCancelImportTaskInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelImportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskInput) GoString() string { + return s.String() +} + +type CancelImportTaskOutput struct { + // The ID of the task being canceled. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The current state of the task being canceled. + PreviousState *string `locationName:"previousState" type:"string"` + + // The current state of the task being canceled. + State *string `locationName:"state" type:"string"` + + metadataCancelImportTaskOutput `json:"-" xml:"-"` +} + +type metadataCancelImportTaskOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelImportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskOutput) GoString() string { + return s.String() +} + +type CancelReservedInstancesListingInput struct { + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string" required:"true"` + + metadataCancelReservedInstancesListingInput `json:"-" xml:"-"` +} + +type metadataCancelReservedInstancesListingInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingInput) GoString() string { + return s.String() +} + +type CancelReservedInstancesListingOutput struct { + // The Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` + + metadataCancelReservedInstancesListingOutput `json:"-" xml:"-"` +} + +type metadataCancelReservedInstancesListingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet error. +type CancelSpotFleetRequestsError struct { + // The error code. + Code *string `locationName:"code" type:"string" required:"true" enum:"CancelBatchErrorCode"` + + // The description for the error code. + Message *string `locationName:"message" type:"string" required:"true"` + + metadataCancelSpotFleetRequestsError `json:"-" xml:"-"` +} + +type metadataCancelSpotFleetRequestsError struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsError) GoString() string { + return s.String() +} + +// Describes a Spot fleet request that was not successfully canceled. +type CancelSpotFleetRequestsErrorItem struct { + // The error. + Error *CancelSpotFleetRequestsError `locationName:"error" type:"structure" required:"true"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + metadataCancelSpotFleetRequestsErrorItem `json:"-" xml:"-"` +} + +type metadataCancelSpotFleetRequestsErrorItem struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsErrorItem) GoString() string { + return s.String() +} + +// Contains the parameters for CancelSpotFleetRequests. +type CancelSpotFleetRequestsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the Spot fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"` + + // Indicates whether to terminate instances for a Spot fleet request if it is + // canceled successfully. + TerminateInstances *bool `locationName:"terminateInstances" type:"boolean" required:"true"` + + metadataCancelSpotFleetRequestsInput `json:"-" xml:"-"` +} + +type metadataCancelSpotFleetRequestsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of CancelSpotFleetRequests. +type CancelSpotFleetRequestsOutput struct { + // Information about the Spot fleet requests that are successfully canceled. + SuccessfulFleetRequests []*CancelSpotFleetRequestsSuccessItem `locationName:"successfulFleetRequestSet" locationNameList:"item" type:"list"` + + // Information about the Spot fleet requests that are not successfully canceled. + UnsuccessfulFleetRequests []*CancelSpotFleetRequestsErrorItem `locationName:"unsuccessfulFleetRequestSet" locationNameList:"item" type:"list"` + + metadataCancelSpotFleetRequestsOutput `json:"-" xml:"-"` +} + +type metadataCancelSpotFleetRequestsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet request that was successfully canceled. +type CancelSpotFleetRequestsSuccessItem struct { + // The current state of the Spot fleet request. + CurrentSpotFleetRequestState *string `locationName:"currentSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + // The previous state of the Spot fleet request. + PreviousSpotFleetRequestState *string `locationName:"previousSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + metadataCancelSpotFleetRequestsSuccessItem `json:"-" xml:"-"` +} + +type metadataCancelSpotFleetRequestsSuccessItem struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) GoString() string { + return s.String() +} + +// Contains the parameters for CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more Spot instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list" required:"true"` + + metadataCancelSpotInstanceRequestsInput `json:"-" xml:"-"` +} + +type metadataCancelSpotInstanceRequestsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsOutput struct { + // One or more Spot instance requests. + CancelledSpotInstanceRequests []*CancelledSpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` + + metadataCancelSpotInstanceRequestsOutput `json:"-" xml:"-"` +} + +type metadataCancelSpotInstanceRequestsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// Describes a request to cancel a Spot instance. +type CancelledSpotInstanceRequest struct { + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The state of the Spot instance request. + State *string `locationName:"state" type:"string" enum:"CancelSpotInstanceRequestState"` + + metadataCancelledSpotInstanceRequest `json:"-" xml:"-"` +} + +type metadataCancelledSpotInstanceRequest struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CancelledSpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelledSpotInstanceRequest) GoString() string { + return s.String() +} + +// Describes a linked EC2-Classic instance. +type ClassicLinkInstance struct { + // A list of security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataClassicLinkInstance `json:"-" xml:"-"` +} + +type metadataClassicLinkInstance struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ClassicLinkInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkInstance) GoString() string { + return s.String() +} + +// Describes the client-specific data. +type ClientData struct { + // A user-defined comment about the disk upload. + Comment *string `type:"string"` + + // The time that the disk upload ends. + UploadEnd *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The size of the uploaded disk image, in GiB. + UploadSize *float64 `type:"double"` + + // The time that the disk upload starts. + UploadStart *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + metadataClientData `json:"-" xml:"-"` +} + +type metadataClientData struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ClientData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientData) GoString() string { + return s.String() +} + +type ConfirmProductInstanceInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The product code. This must be a product code that you own. + ProductCode *string `type:"string" required:"true"` + + metadataConfirmProductInstanceInput `json:"-" xml:"-"` +} + +type metadataConfirmProductInstanceInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ConfirmProductInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceInput) GoString() string { + return s.String() +} + +type ConfirmProductInstanceOutput struct { + // The AWS account ID of the instance owner. This is only present if the product + // code is attached to the instance. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The return value of the request. Returns true if the specified product code + // is owned by the requester and associated with the specified instance. + Return *bool `locationName:"return" type:"boolean"` + + metadataConfirmProductInstanceOutput `json:"-" xml:"-"` +} + +type metadataConfirmProductInstanceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ConfirmProductInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceOutput) GoString() string { + return s.String() +} + +// Describes a conversion task. +type ConversionTask struct { + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // The time when the task expires. If the upload isn't complete before the expiration + // time, we automatically cancel the task. + ExpirationTime *string `locationName:"expirationTime" type:"string"` + + // If the task is for importing an instance, this contains information about + // the import instance task. + ImportInstance *ImportInstanceTaskDetails `locationName:"importInstance" type:"structure"` + + // If the task is for importing a volume, this contains information about the + // import volume task. + ImportVolume *ImportVolumeTaskDetails `locationName:"importVolume" type:"structure"` + + // The state of the conversion task. + State *string `locationName:"state" type:"string" required:"true" enum:"ConversionTaskState"` + + // The status message related to the conversion task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + metadataConversionTask `json:"-" xml:"-"` +} + +type metadataConversionTask struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ConversionTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConversionTask) GoString() string { + return s.String() +} + +type CopyImageInput struct { + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `type:"string"` + + // A description for the new AMI in the destination region. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the new AMI in the destination region. + Name *string `type:"string" required:"true"` + + // The ID of the AMI to copy. + SourceImageId *string `type:"string" required:"true"` + + // The name of the region that contains the AMI to copy. + SourceRegion *string `type:"string" required:"true"` + + metadataCopyImageInput `json:"-" xml:"-"` +} + +type metadataCopyImageInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CopyImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageInput) GoString() string { + return s.String() +} + +type CopyImageOutput struct { + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` + + metadataCopyImageOutput `json:"-" xml:"-"` +} + +type metadataCopyImageOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CopyImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageOutput) GoString() string { + return s.String() +} + +type CopySnapshotInput struct { + // A description for the EBS snapshot. + Description *string `type:"string"` + + // The destination region to use in the PresignedUrl parameter of a snapshot + // copy operation. This parameter is only valid for specifying the destination + // region in a PresignedUrl parameter, where it is required. + // + // CopySnapshot sends the snapshot copy to the regional endpoint that you + // send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS + // CLI, this is specified with the --region parameter or the default region + // in your AWS configuration file). + DestinationRegion *string `locationName:"destinationRegion" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the destination snapshot should be encrypted. There is + // no way to create an unencrypted snapshot copy from an encrypted snapshot; + // however, you can encrypt a copy of an unencrypted snapshot with this flag. + // The default CMK for EBS is used unless a non-default AWS Key Management Service + // (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon + // EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when + // creating the snapshot copy. This parameter is only required if you want to + // use a non-default CMK; if this parameter is not specified, the default CMK + // for EBS is used. The ARN contains the arn:aws:kms namespace, followed by + // the region of the CMK, the AWS account ID of the CMK owner, the key namespace, + // and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // The specified CMK must exist in the region that the snapshot is being copied + // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The pre-signed URL that facilitates copying an encrypted snapshot. This parameter + // is only required when copying an encrypted snapshot with the Amazon EC2 Query + // API; it is available as an optional parameter in all other cases. The PresignedUrl + // should use the snapshot source endpoint, the CopySnapshot action, and include + // the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The + // PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots + // are stored in Amazon S3, the signing algorithm for this parameter uses the + // same logic that is described in Authenticating Requests by Using Query Parameters + // (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + // in the Amazon Simple Storage Service API Reference. An invalid or improperly + // signed PresignedUrl will cause the copy operation to fail asynchronously, + // and the snapshot will move to an error state. + PresignedUrl *string `locationName:"presignedUrl" type:"string"` + + // The ID of the region that contains the snapshot to be copied. + SourceRegion *string `type:"string" required:"true"` + + // The ID of the EBS snapshot to copy. + SourceSnapshotId *string `type:"string" required:"true"` + + metadataCopySnapshotInput `json:"-" xml:"-"` +} + +type metadataCopySnapshotInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +type CopySnapshotOutput struct { + // The ID of the new snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + metadataCopySnapshotOutput `json:"-" xml:"-"` +} + +type metadataCopySnapshotOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +type CreateCustomerGatewayInput struct { + // For devices that support BGP, the customer gateway's BGP ASN. + // + // Default: 65000 + BgpAsn *int64 `type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Internet-routable IP address for the customer gateway's outside interface. + // The address must be static. + PublicIp *string `locationName:"IpAddress" type:"string" required:"true"` + + // The type of VPN connection that this customer gateway supports (ipsec.1). + Type *string `type:"string" required:"true" enum:"GatewayType"` + + metadataCreateCustomerGatewayInput `json:"-" xml:"-"` +} + +type metadataCreateCustomerGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayInput) GoString() string { + return s.String() +} + +type CreateCustomerGatewayOutput struct { + // Information about the customer gateway. + CustomerGateway *CustomerGateway `locationName:"customerGateway" type:"structure"` + + metadataCreateCustomerGatewayOutput `json:"-" xml:"-"` +} + +type metadataCreateCustomerGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayOutput) GoString() string { + return s.String() +} + +type CreateDhcpOptionsInput struct { + // A DHCP configuration option. + DhcpConfigurations []*NewDhcpConfiguration `locationName:"dhcpConfiguration" locationNameList:"item" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataCreateDhcpOptionsInput `json:"-" xml:"-"` +} + +type metadataCreateDhcpOptionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsInput) GoString() string { + return s.String() +} + +type CreateDhcpOptionsOutput struct { + // A set of DHCP options. + DhcpOptions *DhcpOptions `locationName:"dhcpOptions" type:"structure"` + + metadataCreateDhcpOptionsOutput `json:"-" xml:"-"` +} + +type metadataCreateDhcpOptionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsOutput) GoString() string { + return s.String() +} + +type CreateFlowLogsInput struct { + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs + // log group. + DeliverLogsPermissionArn *string `type:"string" required:"true"` + + // The name of the CloudWatch log group. + LogGroupName *string `type:"string" required:"true"` + + // One or more subnet, network interface, or VPC IDs. + ResourceIds []*string `locationName:"ResourceId" locationNameList:"item" type:"list" required:"true"` + + // The type of resource on which to create the flow log. + ResourceType *string `type:"string" required:"true" enum:"FlowLogsResourceType"` + + // The type of traffic to log. + TrafficType *string `type:"string" required:"true" enum:"TrafficType"` + + metadataCreateFlowLogsInput `json:"-" xml:"-"` +} + +type metadataCreateFlowLogsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsInput) GoString() string { + return s.String() +} + +type CreateFlowLogsOutput struct { + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the flow logs. + FlowLogIds []*string `locationName:"flowLogIdSet" locationNameList:"item" type:"list"` + + // Information about the flow logs that could not be created successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` + + metadataCreateFlowLogsOutput `json:"-" xml:"-"` +} + +type metadataCreateFlowLogsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsOutput) GoString() string { + return s.String() +} + +type CreateImageInput struct { + // Information about one or more block device mappings. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for the new image. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // A name for the new image. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + Name *string `locationName:"name" type:"string" required:"true"` + + // By default, this parameter is set to false, which means Amazon EC2 attempts + // to shut down the instance cleanly before image creation and then reboots + // the instance. When the parameter is set to true, Amazon EC2 doesn't shut + // down the instance before creating the image. When this option is used, file + // system integrity on the created image can't be guaranteed. + NoReboot *bool `locationName:"noReboot" type:"boolean"` + + metadataCreateImageInput `json:"-" xml:"-"` +} + +type metadataCreateImageInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageInput) GoString() string { + return s.String() +} + +type CreateImageOutput struct { + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` + + metadataCreateImageOutput `json:"-" xml:"-"` +} + +type metadataCreateImageOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageOutput) GoString() string { + return s.String() +} + +type CreateInstanceExportTaskInput struct { + // A description for the conversion task or the resource being exported. The + // maximum length is 255 bytes. + Description *string `locationName:"description" type:"string"` + + // The format and location for an instance export task. + ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` + + metadataCreateInstanceExportTaskInput `json:"-" xml:"-"` +} + +type metadataCreateInstanceExportTaskInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskInput) GoString() string { + return s.String() +} + +type CreateInstanceExportTaskOutput struct { + // Information about the instance export task. + ExportTask *ExportTask `locationName:"exportTask" type:"structure"` + + metadataCreateInstanceExportTaskOutput `json:"-" xml:"-"` +} + +type metadataCreateInstanceExportTaskOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskOutput) GoString() string { + return s.String() +} + +type CreateInternetGatewayInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataCreateInternetGatewayInput `json:"-" xml:"-"` +} + +type metadataCreateInternetGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayInput) GoString() string { + return s.String() +} + +type CreateInternetGatewayOutput struct { + // Information about the Internet gateway. + InternetGateway *InternetGateway `locationName:"internetGateway" type:"structure"` + + metadataCreateInternetGatewayOutput `json:"-" xml:"-"` +} + +type metadataCreateInternetGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayOutput) GoString() string { + return s.String() +} + +type CreateKeyPairInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + // + // Constraints: Up to 255 ASCII characters + KeyName *string `type:"string" required:"true"` + + metadataCreateKeyPairInput `json:"-" xml:"-"` +} + +type metadataCreateKeyPairInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairInput) GoString() string { + return s.String() +} + +// Describes a key pair. +type CreateKeyPairOutput struct { + // The SHA-1 digest of the DER encoded private key. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // An unencrypted PEM encoded RSA private key. + KeyMaterial *string `locationName:"keyMaterial" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + metadataCreateKeyPairOutput `json:"-" xml:"-"` +} + +type metadataCreateKeyPairOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairOutput) GoString() string { + return s.String() +} + +type CreateNetworkAclEntryInput struct { + // The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether this is an egress rule (rule is applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP type and code. Required if specifying ICMP for the + // protocol. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol. A value of -1 means all protocols. + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number for the entry (for example, 100). ACL entries are processed + // in ascending order by rule number. + // + // Constraints: Positive integer from 1 to 32766 + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` + + metadataCreateNetworkAclEntryInput `json:"-" xml:"-"` +} + +type metadataCreateNetworkAclEntryInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryInput) GoString() string { + return s.String() +} + +type CreateNetworkAclEntryOutput struct { + metadataCreateNetworkAclEntryOutput `json:"-" xml:"-"` +} + +type metadataCreateNetworkAclEntryOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type CreateNetworkAclInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataCreateNetworkAclInput `json:"-" xml:"-"` +} + +type metadataCreateNetworkAclInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclInput) GoString() string { + return s.String() +} + +type CreateNetworkAclOutput struct { + // Information about the network ACL. + NetworkAcl *NetworkAcl `locationName:"networkAcl" type:"structure"` + + metadataCreateNetworkAclOutput `json:"-" xml:"-"` +} + +type metadataCreateNetworkAclOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclOutput) GoString() string { + return s.String() +} + +type CreateNetworkInterfaceInput struct { + // A description for the network interface. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The primary private IP address of the network interface. If you don't specify + // an IP address, Amazon EC2 selects one for you from the subnet range. If you + // specify an IP address, you cannot indicate any IP addresses specified in + // privateIpAddresses as primary (only one IP address can be designated as primary). + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IP addresses. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IP addresses to assign to a network interface. + // When you specify a number of secondary IP addresses, Amazon EC2 selects these + // IP addresses within the subnet range. You can't specify this option and specify + // more than one private IP address using privateIpAddresses. + // + // The number of IP addresses you can assign to a network interface varies + // by instance type. For more information, see Private IP Addresses Per ENI + // Per Instance Type (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) + // in the Amazon Elastic Compute Cloud User Guide. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet to associate with the network interface. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` + + metadataCreateNetworkInterfaceInput `json:"-" xml:"-"` +} + +type metadataCreateNetworkInterfaceInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceInput) GoString() string { + return s.String() +} + +type CreateNetworkInterfaceOutput struct { + // Information about the network interface. + NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"` + + metadataCreateNetworkInterfaceOutput `json:"-" xml:"-"` +} + +type metadataCreateNetworkInterfaceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type CreatePlacementGroupInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A name for the placement group. + // + // Constraints: Up to 255 ASCII characters + GroupName *string `locationName:"groupName" type:"string" required:"true"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" required:"true" enum:"PlacementStrategy"` + + metadataCreatePlacementGroupInput `json:"-" xml:"-"` +} + +type metadataCreatePlacementGroupInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreatePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupInput) GoString() string { + return s.String() +} + +type CreatePlacementGroupOutput struct { + metadataCreatePlacementGroupOutput `json:"-" xml:"-"` +} + +type metadataCreatePlacementGroupOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreatePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupOutput) GoString() string { + return s.String() +} + +type CreateReservedInstancesListingInput struct { + // Unique, case-sensitive identifier you provide to ensure idempotency of your + // listings. This helps avoid duplicate listings. For more information, see + // Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The number of instances that are a part of a Reserved Instance account to + // be listed in the Reserved Instance Marketplace. This number should be less + // than or equal to the instance count associated with the Reserved Instance + // ID specified in this call. + InstanceCount *int64 `locationName:"instanceCount" type:"integer" required:"true"` + + // A list specifying the price of the Reserved Instance for each month remaining + // in the Reserved Instance term. + PriceSchedules []*PriceScheduleSpecification `locationName:"priceSchedules" locationNameList:"item" type:"list" required:"true"` + + // The ID of the active Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string" required:"true"` + + metadataCreateReservedInstancesListingInput `json:"-" xml:"-"` +} + +type metadataCreateReservedInstancesListingInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingInput) GoString() string { + return s.String() +} + +type CreateReservedInstancesListingOutput struct { + // Information about the Reserved Instances listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` + + metadataCreateReservedInstancesListingOutput `json:"-" xml:"-"` +} + +type metadataCreateReservedInstancesListingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingOutput) GoString() string { + return s.String() +} + +type CreateRouteInput struct { + // The CIDR address block used for the destination match. Routing decisions + // are based on the most specific match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of an Internet gateway or virtual private gateway attached to your + // VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. The operation fails if you specify + // an instance ID unless exactly one network interface is attached. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table for the route. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` + + metadataCreateRouteInput `json:"-" xml:"-"` +} + +type metadataCreateRouteInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteInput) GoString() string { + return s.String() +} + +type CreateRouteOutput struct { + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` + + metadataCreateRouteOutput `json:"-" xml:"-"` +} + +type metadataCreateRouteOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteOutput) GoString() string { + return s.String() +} + +type CreateRouteTableInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataCreateRouteTableInput `json:"-" xml:"-"` +} + +type metadataCreateRouteTableInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableInput) GoString() string { + return s.String() +} + +type CreateRouteTableOutput struct { + // Information about the route table. + RouteTable *RouteTable `locationName:"routeTable" type:"structure"` + + metadataCreateRouteTableOutput `json:"-" xml:"-"` +} + +type metadataCreateRouteTableOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableOutput) GoString() string { + return s.String() +} + +type CreateSecurityGroupInput struct { + // A description for the security group. This is informational only. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + Description *string `locationName:"GroupDescription" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the security group. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + GroupName *string `type:"string" required:"true"` + + // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. + VpcId *string `type:"string"` + + metadataCreateSecurityGroupInput `json:"-" xml:"-"` +} + +type metadataCreateSecurityGroupInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateSecurityGroupOutput struct { + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + metadataCreateSecurityGroupOutput `json:"-" xml:"-"` +} + +type metadataCreateSecurityGroupOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateSnapshotInput struct { + // A description for the snapshot. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `type:"string" required:"true"` + + metadataCreateSnapshotInput `json:"-" xml:"-"` +} + +type metadataCreateSnapshotInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionInput struct { + // The Amazon S3 bucket in which to store the Spot instance data feed. + Bucket *string `locationName:"bucket" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A prefix for the data feed file names. + Prefix *string `locationName:"prefix" type:"string"` + + metadataCreateSpotDatafeedSubscriptionInput `json:"-" xml:"-"` +} + +type metadataCreateSpotDatafeedSubscriptionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Contains the output of CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionOutput struct { + // The Spot instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` + + metadataCreateSpotDatafeedSubscriptionOutput `json:"-" xml:"-"` +} + +type metadataCreateSpotDatafeedSubscriptionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateSubnetInput struct { + // The Availability Zone for the subnet. + // + // Default: AWS selects one for you. If you create more than one subnet in + // your VPC, we may not necessarily select a different zone for each subnet. + AvailabilityZone *string `type:"string"` + + // The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + metadataCreateSubnetInput `json:"-" xml:"-"` +} + +type metadataCreateSubnetInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetInput) GoString() string { + return s.String() +} + +type CreateSubnetOutput struct { + // Information about the subnet. + Subnet *Subnet `locationName:"subnet" type:"structure"` + + metadataCreateSubnetOutput `json:"-" xml:"-"` +} + +type metadataCreateSubnetOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetOutput) GoString() string { + return s.String() +} + +type CreateTagsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more resources to tag. For example, ami-1a2b3c4d. + Resources []*string `locationName:"ResourceId" type:"list" required:"true"` + + // One or more tags. The value parameter is required, but if you don't want + // the tag to have a value, specify the parameter with no value, and we set + // the value to an empty string. + Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list" required:"true"` + + metadataCreateTagsInput `json:"-" xml:"-"` +} + +type metadataCreateTagsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +type CreateTagsOutput struct { + metadataCreateTagsOutput `json:"-" xml:"-"` +} + +type metadataCreateTagsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +type CreateVolumeInput struct { + // The Availability Zone in which to create the volume. Use DescribeAvailabilityZones + // to list the Availability Zones that are currently available to you. + AvailabilityZone *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes + // may only be attached to instances that support Amazon EBS encryption. Volumes + // that are created from encrypted snapshots are automatically encrypted. There + // is no way to create an encrypted volume from an unencrypted snapshot or vice + // versa. If your AMI uses encrypted volumes, you can only launch it on supported + // instance types. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // Only valid for Provisioned IOPS (SSD) volumes. The number of I/O operations + // per second (IOPS) to provision for the volume, with a maximum ratio of 30 + // IOPS/GiB. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes + Iops *int64 `type:"integer"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use when creating the encrypted volume. This parameter is only + // required if you want to use a non-default CMK; if this parameter is not specified, + // the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, + // followed by the region of the CMK, the AWS account ID of the CMK owner, the + // key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `type:"string"` + + // The size of the volume, in GiBs. + // + // Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 + // for io1 volumes. If you specify a snapshot, the volume size must be equal + // to or larger than the snapshot size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + Size *int64 `type:"integer"` + + // The snapshot from which to create the volume. + SnapshotId *string `type:"string"` + + // The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for + // Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `type:"string" enum:"VolumeType"` + + metadataCreateVolumeInput `json:"-" xml:"-"` +} + +type metadataCreateVolumeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumeInput) GoString() string { + return s.String() +} + +// Describes the user or group to be added or removed from the permissions for +// a volume. +type CreateVolumePermission struct { + // The specific group that is to be added or removed from a volume's list of + // create volume permissions. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The specific AWS account ID that is to be added or removed from a volume's + // list of create volume permissions. + UserId *string `locationName:"userId" type:"string"` + + metadataCreateVolumePermission `json:"-" xml:"-"` +} + +type metadataCreateVolumePermission struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVolumePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermission) GoString() string { + return s.String() +} + +// Describes modifications to the permissions for a volume. +type CreateVolumePermissionModifications struct { + // Adds a specific AWS account ID or group to a volume's list of create volume + // permissions. + Add []*CreateVolumePermission `locationNameList:"item" type:"list"` + + // Removes a specific AWS account ID or group from a volume's list of create + // volume permissions. + Remove []*CreateVolumePermission `locationNameList:"item" type:"list"` + + metadataCreateVolumePermissionModifications `json:"-" xml:"-"` +} + +type metadataCreateVolumePermissionModifications struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVolumePermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermissionModifications) GoString() string { + return s.String() +} + +type CreateVpcEndpointInput struct { + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // A policy to attach to the endpoint that controls access to the service. The + // policy must be in valid JSON format. If this parameter is not specified, + // we attach a default policy that allows full access to the service. + PolicyDocument *string `type:"string"` + + // One or more route table IDs. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` + + // The AWS service name, in the form com.amazonaws.region.service. To get a + // list of available services, use the DescribeVpcEndpointServices request. + ServiceName *string `type:"string" required:"true"` + + // The ID of the VPC in which the endpoint will be used. + VpcId *string `type:"string" required:"true"` + + metadataCreateVpcEndpointInput `json:"-" xml:"-"` +} + +type metadataCreateVpcEndpointInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointInput) GoString() string { + return s.String() +} + +type CreateVpcEndpointOutput struct { + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the endpoint. + VpcEndpoint *VpcEndpoint `locationName:"vpcEndpoint" type:"structure"` + + metadataCreateVpcEndpointOutput `json:"-" xml:"-"` +} + +type metadataCreateVpcEndpointOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointOutput) GoString() string { + return s.String() +} + +type CreateVpcInput struct { + // The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The supported tenancy options for instances launched into the VPC. A value + // of default means that instances can be launched with any tenancy; a value + // of dedicated means all instances launched into the VPC are launched as dedicated + // tenancy instances regardless of the tenancy assigned to the instance at launch. + // Dedicated tenancy instances run on single-tenant hardware. + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + metadataCreateVpcInput `json:"-" xml:"-"` +} + +type metadataCreateVpcInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcInput) GoString() string { + return s.String() +} + +type CreateVpcOutput struct { + // Information about the VPC. + Vpc *Vpc `locationName:"vpc" type:"structure"` + + metadataCreateVpcOutput `json:"-" xml:"-"` +} + +type metadataCreateVpcOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcOutput) GoString() string { + return s.String() +} + +type CreateVpcPeeringConnectionInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The AWS account ID of the owner of the peer VPC. + // + // Default: Your AWS account ID + PeerOwnerId *string `locationName:"peerOwnerId" type:"string"` + + // The ID of the VPC with which you are creating the VPC peering connection. + PeerVpcId *string `locationName:"peerVpcId" type:"string"` + + // The ID of the requester VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataCreateVpcPeeringConnectionInput `json:"-" xml:"-"` +} + +type metadataCreateVpcPeeringConnectionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type CreateVpcPeeringConnectionOutput struct { + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` + + metadataCreateVpcPeeringConnectionOutput `json:"-" xml:"-"` +} + +type metadataCreateVpcPeeringConnectionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type CreateVpnConnectionInput struct { + // The ID of the customer gateway. + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the VPN connection requires static routes. If you are creating + // a VPN connection for a device that does not support BGP, you must specify + // true. + // + // Default: false + Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` + + // The type of VPN connection (ipsec.1). + Type *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` + + metadataCreateVpnConnectionInput `json:"-" xml:"-"` +} + +type metadataCreateVpnConnectionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionInput) GoString() string { + return s.String() +} + +type CreateVpnConnectionOutput struct { + // Information about the VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` + + metadataCreateVpnConnectionOutput `json:"-" xml:"-"` +} + +type metadataCreateVpnConnectionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionOutput) GoString() string { + return s.String() +} + +type CreateVpnConnectionRouteInput struct { + // The CIDR block associated with the local subnet of the customer network. + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` + + metadataCreateVpnConnectionRouteInput `json:"-" xml:"-"` +} + +type metadataCreateVpnConnectionRouteInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteInput) GoString() string { + return s.String() +} + +type CreateVpnConnectionRouteOutput struct { + metadataCreateVpnConnectionRouteOutput `json:"-" xml:"-"` +} + +type metadataCreateVpnConnectionRouteOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +type CreateVpnGatewayInput struct { + // The Availability Zone for the virtual private gateway. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of VPN connection this virtual private gateway supports. + Type *string `type:"string" required:"true" enum:"GatewayType"` + + metadataCreateVpnGatewayInput `json:"-" xml:"-"` +} + +type metadataCreateVpnGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayInput) GoString() string { + return s.String() +} + +type CreateVpnGatewayOutput struct { + // Information about the virtual private gateway. + VpnGateway *VpnGateway `locationName:"vpnGateway" type:"structure"` + + metadataCreateVpnGatewayOutput `json:"-" xml:"-"` +} + +type metadataCreateVpnGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a customer gateway. +type CustomerGateway struct { + // The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number + // (ASN). + BgpAsn *string `locationName:"bgpAsn" type:"string"` + + // The ID of the customer gateway. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The Internet-routable IP address of the customer gateway's outside interface. + IpAddress *string `locationName:"ipAddress" type:"string"` + + // The current state of the customer gateway (pending | available | deleting + // | deleted). + State *string `locationName:"state" type:"string"` + + // Any tags assigned to the customer gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the customer gateway supports (ipsec.1). + Type *string `locationName:"type" type:"string"` + + metadataCustomerGateway `json:"-" xml:"-"` +} + +type metadataCustomerGateway struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s CustomerGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerGateway) GoString() string { + return s.String() +} + +type DeleteCustomerGatewayInput struct { + // The ID of the customer gateway. + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataDeleteCustomerGatewayInput `json:"-" xml:"-"` +} + +type metadataDeleteCustomerGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayInput) GoString() string { + return s.String() +} + +type DeleteCustomerGatewayOutput struct { + metadataDeleteCustomerGatewayOutput `json:"-" xml:"-"` +} + +type metadataDeleteCustomerGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayOutput) GoString() string { + return s.String() +} + +type DeleteDhcpOptionsInput struct { + // The ID of the DHCP options set. + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataDeleteDhcpOptionsInput `json:"-" xml:"-"` +} + +type metadataDeleteDhcpOptionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsInput) GoString() string { + return s.String() +} + +type DeleteDhcpOptionsOutput struct { + metadataDeleteDhcpOptionsOutput `json:"-" xml:"-"` +} + +type metadataDeleteDhcpOptionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsOutput) GoString() string { + return s.String() +} + +type DeleteFlowLogsInput struct { + // One or more flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list" required:"true"` + + metadataDeleteFlowLogsInput `json:"-" xml:"-"` +} + +type metadataDeleteFlowLogsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsInput) GoString() string { + return s.String() +} + +type DeleteFlowLogsOutput struct { + // Information about the flow logs that could not be deleted successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` + + metadataDeleteFlowLogsOutput `json:"-" xml:"-"` +} + +type metadataDeleteFlowLogsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsOutput) GoString() string { + return s.String() +} + +type DeleteInternetGatewayInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + metadataDeleteInternetGatewayInput `json:"-" xml:"-"` +} + +type metadataDeleteInternetGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayInput) GoString() string { + return s.String() +} + +type DeleteInternetGatewayOutput struct { + metadataDeleteInternetGatewayOutput `json:"-" xml:"-"` +} + +type metadataDeleteInternetGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayOutput) GoString() string { + return s.String() +} + +type DeleteKeyPairInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the key pair. + KeyName *string `type:"string" required:"true"` + + metadataDeleteKeyPairInput `json:"-" xml:"-"` +} + +type metadataDeleteKeyPairInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairInput) GoString() string { + return s.String() +} + +type DeleteKeyPairOutput struct { + metadataDeleteKeyPairOutput `json:"-" xml:"-"` +} + +type metadataDeleteKeyPairOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairOutput) GoString() string { + return s.String() +} + +type DeleteNetworkAclEntryInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the rule is an egress rule. + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // The rule number of the entry to delete. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` + + metadataDeleteNetworkAclEntryInput `json:"-" xml:"-"` +} + +type metadataDeleteNetworkAclEntryInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryInput) GoString() string { + return s.String() +} + +type DeleteNetworkAclEntryOutput struct { + metadataDeleteNetworkAclEntryOutput `json:"-" xml:"-"` +} + +type metadataDeleteNetworkAclEntryOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type DeleteNetworkAclInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + metadataDeleteNetworkAclInput `json:"-" xml:"-"` +} + +type metadataDeleteNetworkAclInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclInput) GoString() string { + return s.String() +} + +type DeleteNetworkAclOutput struct { + metadataDeleteNetworkAclOutput `json:"-" xml:"-"` +} + +type metadataDeleteNetworkAclOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclOutput) GoString() string { + return s.String() +} + +type DeleteNetworkInterfaceInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + metadataDeleteNetworkInterfaceInput `json:"-" xml:"-"` +} + +type metadataDeleteNetworkInterfaceInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceInput) GoString() string { + return s.String() +} + +type DeleteNetworkInterfaceOutput struct { + metadataDeleteNetworkInterfaceOutput `json:"-" xml:"-"` +} + +type metadataDeleteNetworkInterfaceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type DeletePlacementGroupInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string" required:"true"` + + metadataDeletePlacementGroupInput `json:"-" xml:"-"` +} + +type metadataDeletePlacementGroupInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeletePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupInput) GoString() string { + return s.String() +} + +type DeletePlacementGroupOutput struct { + metadataDeletePlacementGroupOutput `json:"-" xml:"-"` +} + +type metadataDeletePlacementGroupOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeletePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupOutput) GoString() string { + return s.String() +} + +type DeleteRouteInput struct { + // The CIDR range for the route. The value you specify must match the CIDR for + // the route exactly. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + metadataDeleteRouteInput `json:"-" xml:"-"` +} + +type metadataDeleteRouteInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteInput) GoString() string { + return s.String() +} + +type DeleteRouteOutput struct { + metadataDeleteRouteOutput `json:"-" xml:"-"` +} + +type metadataDeleteRouteOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteOutput) GoString() string { + return s.String() +} + +type DeleteRouteTableInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + metadataDeleteRouteTableInput `json:"-" xml:"-"` +} + +type metadataDeleteRouteTableInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableInput) GoString() string { + return s.String() +} + +type DeleteRouteTableOutput struct { + metadataDeleteRouteTableOutput `json:"-" xml:"-"` +} + +type metadataDeleteRouteTableOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableOutput) GoString() string { + return s.String() +} + +type DeleteSecurityGroupInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You can specify + // either the security group name or the security group ID. + GroupName *string `type:"string"` + + metadataDeleteSecurityGroupInput `json:"-" xml:"-"` +} + +type metadataDeleteSecurityGroupInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteSecurityGroupOutput struct { + metadataDeleteSecurityGroupOutput `json:"-" xml:"-"` +} + +type metadataDeleteSecurityGroupOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteSnapshotInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + SnapshotId *string `type:"string" required:"true"` + + metadataDeleteSnapshotInput `json:"-" xml:"-"` +} + +type metadataDeleteSnapshotInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +type DeleteSnapshotOutput struct { + metadataDeleteSnapshotOutput `json:"-" xml:"-"` +} + +type metadataDeleteSnapshotOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteSpotDatafeedSubscription. +type DeleteSpotDatafeedSubscriptionInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataDeleteSpotDatafeedSubscriptionInput `json:"-" xml:"-"` +} + +type metadataDeleteSpotDatafeedSubscriptionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteSpotDatafeedSubscriptionOutput struct { + metadataDeleteSpotDatafeedSubscriptionOutput `json:"-" xml:"-"` +} + +type metadataDeleteSpotDatafeedSubscriptionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteSubnetInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the subnet. + SubnetId *string `type:"string" required:"true"` + + metadataDeleteSubnetInput `json:"-" xml:"-"` +} + +type metadataDeleteSubnetInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetInput) GoString() string { + return s.String() +} + +type DeleteSubnetOutput struct { + metadataDeleteSubnetOutput `json:"-" xml:"-"` +} + +type metadataDeleteSubnetOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the resource. For example, ami-1a2b3c4d. You can specify more than + // one resource ID. + Resources []*string `locationName:"resourceId" type:"list" required:"true"` + + // One or more tags to delete. If you omit the value parameter, we delete the + // tag regardless of its value. If you specify this parameter with an empty + // string as the value, we delete the key only if its value is an empty string. + Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` + + metadataDeleteTagsInput `json:"-" xml:"-"` +} + +type metadataDeleteTagsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + metadataDeleteTagsOutput `json:"-" xml:"-"` +} + +type metadataDeleteTagsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DeleteVolumeInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` + + metadataDeleteVolumeInput `json:"-" xml:"-"` +} + +type metadataDeleteVolumeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeInput) GoString() string { + return s.String() +} + +type DeleteVolumeOutput struct { + metadataDeleteVolumeOutput `json:"-" xml:"-"` +} + +type metadataDeleteVolumeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeOutput) GoString() string { + return s.String() +} + +type DeleteVpcEndpointsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"` + + metadataDeleteVpcEndpointsInput `json:"-" xml:"-"` +} + +type metadataDeleteVpcEndpointsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsInput) GoString() string { + return s.String() +} + +type DeleteVpcEndpointsOutput struct { + // Information about the endpoints that were not successfully deleted. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` + + metadataDeleteVpcEndpointsOutput `json:"-" xml:"-"` +} + +type metadataDeleteVpcEndpointsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsOutput) GoString() string { + return s.String() +} + +type DeleteVpcInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + metadataDeleteVpcInput `json:"-" xml:"-"` +} + +type metadataDeleteVpcInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcInput) GoString() string { + return s.String() +} + +type DeleteVpcOutput struct { + metadataDeleteVpcOutput `json:"-" xml:"-"` +} + +type metadataDeleteVpcOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcOutput) GoString() string { + return s.String() +} + +type DeleteVpcPeeringConnectionInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` + + metadataDeleteVpcPeeringConnectionInput `json:"-" xml:"-"` +} + +type metadataDeleteVpcPeeringConnectionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type DeleteVpcPeeringConnectionOutput struct { + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` + + metadataDeleteVpcPeeringConnectionOutput `json:"-" xml:"-"` +} + +type metadataDeleteVpcPeeringConnectionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` + + metadataDeleteVpnConnectionInput `json:"-" xml:"-"` +} + +type metadataDeleteVpnConnectionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionInput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionOutput struct { + metadataDeleteVpnConnectionOutput `json:"-" xml:"-"` +} + +type metadataDeleteVpnConnectionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionOutput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionRouteInput struct { + // The CIDR block associated with the local subnet of the customer network. + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` + + metadataDeleteVpnConnectionRouteInput `json:"-" xml:"-"` +} + +type metadataDeleteVpnConnectionRouteInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteInput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionRouteOutput struct { + metadataDeleteVpnConnectionRouteOutput `json:"-" xml:"-"` +} + +type metadataDeleteVpnConnectionRouteOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +type DeleteVpnGatewayInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` + + metadataDeleteVpnGatewayInput `json:"-" xml:"-"` +} + +type metadataDeleteVpnGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayInput) GoString() string { + return s.String() +} + +type DeleteVpnGatewayOutput struct { + metadataDeleteVpnGatewayOutput `json:"-" xml:"-"` +} + +type metadataDeleteVpnGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayOutput) GoString() string { + return s.String() +} + +type DeregisterImageInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` + + metadataDeregisterImageInput `json:"-" xml:"-"` +} + +type metadataDeregisterImageInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeregisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageInput) GoString() string { + return s.String() +} + +type DeregisterImageOutput struct { + metadataDeregisterImageOutput `json:"-" xml:"-"` +} + +type metadataDeregisterImageOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DeregisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageOutput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesInput struct { + // One or more account attribute names. + AttributeNames []*string `locationName:"attributeName" locationNameList:"attributeName" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataDescribeAccountAttributesInput `json:"-" xml:"-"` +} + +type metadataDescribeAccountAttributesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesOutput struct { + // Information about one or more account attributes. + AccountAttributes []*AccountAttribute `locationName:"accountAttributeSet" locationNameList:"item" type:"list"` + + metadataDescribeAccountAttributesOutput `json:"-" xml:"-"` +} + +type metadataDescribeAccountAttributesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +type DescribeAddressesInput struct { + // [EC2-VPC] One or more allocation IDs. + // + // Default: Describes all your Elastic IP addresses. + AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // allocation-id - [EC2-VPC] The allocation ID for the address. + // + // association-id - [EC2-VPC] The association ID for the address. + // + // domain - Indicates whether the address is for use in EC2-Classic (standard) + // or in a VPC (vpc). + // + // instance-id - The ID of the instance the address is associated with, if + // any. + // + // network-interface-id - [EC2-VPC] The ID of the network interface that + // the address is associated with, if any. + // + // network-interface-owner-id - The AWS account ID of the owner. + // + // private-ip-address - [EC2-VPC] The private IP address associated with + // the Elastic IP address. + // + // public-ip - The Elastic IP address. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // [EC2-Classic] One or more Elastic IP addresses. + // + // Default: Describes all your Elastic IP addresses. + PublicIps []*string `locationName:"PublicIp" locationNameList:"PublicIp" type:"list"` + + metadataDescribeAddressesInput `json:"-" xml:"-"` +} + +type metadataDescribeAddressesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesInput) GoString() string { + return s.String() +} + +type DescribeAddressesOutput struct { + // Information about one or more Elastic IP addresses. + Addresses []*Address `locationName:"addressesSet" locationNameList:"item" type:"list"` + + metadataDescribeAddressesOutput `json:"-" xml:"-"` +} + +type metadataDescribeAddressesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesOutput) GoString() string { + return s.String() +} + +type DescribeAvailabilityZonesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // message - Information about the Availability Zone. + // + // region-name - The name of the region for the Availability Zone (for example, + // us-east-1). + // + // state - The state of the Availability Zone (available | information | + // impaired | unavailable). + // + // zone-name - The name of the Availability Zone (for example, us-east-1a). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of one or more Availability Zones. + ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"` + + metadataDescribeAvailabilityZonesInput `json:"-" xml:"-"` +} + +type metadataDescribeAvailabilityZonesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesInput) GoString() string { + return s.String() +} + +type DescribeAvailabilityZonesOutput struct { + // Information about one or more Availability Zones. + AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"` + + metadataDescribeAvailabilityZonesOutput `json:"-" xml:"-"` +} + +type metadataDescribeAvailabilityZonesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesOutput) GoString() string { + return s.String() +} + +type DescribeBundleTasksInput struct { + // One or more bundle task IDs. + // + // Default: Describes all your bundle tasks. + BundleIds []*string `locationName:"BundleId" locationNameList:"BundleId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // bundle-id - The ID of the bundle task. + // + // error-code - If the task failed, the error code returned. + // + // error-message - If the task failed, the error message returned. + // + // instance-id - The ID of the instance. + // + // progress - The level of task completion, as a percentage (for example, + // 20%). + // + // s3-bucket - The Amazon S3 bucket to store the AMI. + // + // s3-prefix - The beginning of the AMI name. + // + // start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z). + // + // state - The state of the task (pending | waiting-for-shutdown | bundling + // | storing | cancelling | complete | failed). + // + // update-time - The time of the most recent update for the task. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + metadataDescribeBundleTasksInput `json:"-" xml:"-"` +} + +type metadataDescribeBundleTasksInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeBundleTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksInput) GoString() string { + return s.String() +} + +type DescribeBundleTasksOutput struct { + // Information about one or more bundle tasks. + BundleTasks []*BundleTask `locationName:"bundleInstanceTasksSet" locationNameList:"item" type:"list"` + + metadataDescribeBundleTasksOutput `json:"-" xml:"-"` +} + +type metadataDescribeBundleTasksOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeBundleTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksOutput) GoString() string { + return s.String() +} + +type DescribeClassicLinkInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // group-id - The ID of a VPC security group that's associated with the instance. + // + // instance-id - The ID of the instance. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC that the instance is linked to. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeClassicLinkInstancesInput `json:"-" xml:"-"` +} + +type metadataDescribeClassicLinkInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesInput) GoString() string { + return s.String() +} + +type DescribeClassicLinkInstancesOutput struct { + // Information about one or more linked EC2-Classic instances. + Instances []*ClassicLinkInstance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeClassicLinkInstancesOutput `json:"-" xml:"-"` +} + +type metadataDescribeClassicLinkInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesOutput) GoString() string { + return s.String() +} + +type DescribeConversionTasksInput struct { + // One or more conversion task IDs. + ConversionTaskIds []*string `locationName:"conversionTaskId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + metadataDescribeConversionTasksInput `json:"-" xml:"-"` +} + +type metadataDescribeConversionTasksInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeConversionTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksInput) GoString() string { + return s.String() +} + +type DescribeConversionTasksOutput struct { + // Information about the conversion tasks. + ConversionTasks []*ConversionTask `locationName:"conversionTasks" locationNameList:"item" type:"list"` + + metadataDescribeConversionTasksOutput `json:"-" xml:"-"` +} + +type metadataDescribeConversionTasksOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeConversionTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksOutput) GoString() string { + return s.String() +} + +type DescribeCustomerGatewaysInput struct { + // One or more customer gateway IDs. + // + // Default: Describes all your customer gateways. + CustomerGatewayIds []*string `locationName:"CustomerGatewayId" locationNameList:"CustomerGatewayId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous + // System Number (ASN). + // + // customer-gateway-id - The ID of the customer gateway. + // + // ip-address - The IP address of the customer gateway's Internet-routable + // external interface. + // + // state - The state of the customer gateway (pending | available | deleting + // | deleted). + // + // type - The type of customer gateway. Currently, the only supported type + // is ipsec.1. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + metadataDescribeCustomerGatewaysInput `json:"-" xml:"-"` +} + +type metadataDescribeCustomerGatewaysInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysInput) GoString() string { + return s.String() +} + +type DescribeCustomerGatewaysOutput struct { + // Information about one or more customer gateways. + CustomerGateways []*CustomerGateway `locationName:"customerGatewaySet" locationNameList:"item" type:"list"` + + metadataDescribeCustomerGatewaysOutput `json:"-" xml:"-"` +} + +type metadataDescribeCustomerGatewaysOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysOutput) GoString() string { + return s.String() +} + +type DescribeDhcpOptionsInput struct { + // The IDs of one or more DHCP options sets. + // + // Default: Describes all your DHCP options sets. + DhcpOptionsIds []*string `locationName:"DhcpOptionsId" locationNameList:"DhcpOptionsId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // dhcp-options-id - The ID of a set of DHCP options. + // + // key - The key for one of the options (for example, domain-name). + // + // value - The value for one of the options. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + metadataDescribeDhcpOptionsInput `json:"-" xml:"-"` +} + +type metadataDescribeDhcpOptionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsInput) GoString() string { + return s.String() +} + +type DescribeDhcpOptionsOutput struct { + // Information about one or more DHCP options sets. + DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"` + + metadataDescribeDhcpOptionsOutput `json:"-" xml:"-"` +} + +type metadataDescribeDhcpOptionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsOutput) GoString() string { + return s.String() +} + +type DescribeExportTasksInput struct { + // One or more export task IDs. + ExportTaskIds []*string `locationName:"exportTaskId" locationNameList:"ExportTaskId" type:"list"` + + metadataDescribeExportTasksInput `json:"-" xml:"-"` +} + +type metadataDescribeExportTasksInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +type DescribeExportTasksOutput struct { + // Information about the export tasks. + ExportTasks []*ExportTask `locationName:"exportTaskSet" locationNameList:"item" type:"list"` + + metadataDescribeExportTasksOutput `json:"-" xml:"-"` +} + +type metadataDescribeExportTasksOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +type DescribeFlowLogsInput struct { + // One or more filters. + // + // deliver-log-status - The status of the logs delivery (SUCCESS | FAILED). + // + // flow-log-id - The ID of the flow log. + // + // log-group-name - The name of the log group. + // + // resource-id - The ID of the VPC, subnet, or network interface. + // + // traffic-type - The type of traffic (ACCEPT | REJECT | ALL) + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // One or more flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // NextToken value. This value can be between 5 and 1000; if MaxResults is given + // a value larger than 1000, only 1000 results are returned. You cannot specify + // this parameter and the flow log IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` + + metadataDescribeFlowLogsInput `json:"-" xml:"-"` +} + +type metadataDescribeFlowLogsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsInput) GoString() string { + return s.String() +} + +type DescribeFlowLogsOutput struct { + // Information about the flow logs. + FlowLogs []*FlowLog `locationName:"flowLogSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeFlowLogsOutput `json:"-" xml:"-"` +} + +type metadataDescribeFlowLogsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsOutput) GoString() string { + return s.String() +} + +type DescribeImageAttributeInput struct { + // The AMI attribute. + // + // Note: Depending on your account privileges, the blockDeviceMapping attribute + // may return a Client.AuthFailure error. If this happens, use DescribeImages + // to get information about the block device mapping for the AMI. + Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` + + metadataDescribeImageAttributeInput `json:"-" xml:"-"` +} + +type metadataDescribeImageAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeInput) GoString() string { + return s.String() +} + +// Describes an image attribute. +type DescribeImageAttributeOutput struct { + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // A description for the AMI. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // One or more launch permissions. + LaunchPermissions []*LaunchPermission `locationName:"launchPermission" locationNameList:"item" type:"list"` + + // One or more product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // The value to use for a resource attribute. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + metadataDescribeImageAttributeOutput `json:"-" xml:"-"` +} + +type metadataDescribeImageAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeOutput) GoString() string { + return s.String() +} + +type DescribeImagesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Scopes the images by users with explicit launch permissions. Specify an AWS + // account ID, self (the sender of the request), or all (public AMIs). + ExecutableUsers []*string `locationName:"ExecutableBy" locationNameList:"ExecutableBy" type:"list"` + + // One or more filters. + // + // architecture - The image architecture (i386 | x86_64). + // + // block-device-mapping.delete-on-termination - A Boolean value that indicates + // whether the Amazon EBS volume is deleted on instance termination. + // + // block-device-mapping.device-name - The device name for the EBS volume + // (for example, /dev/sdh). + // + // block-device-mapping.snapshot-id - The ID of the snapshot used for the + // EBS volume. + // + // block-device-mapping.volume-size - The volume size of the EBS volume, + // in GiB. + // + // block-device-mapping.volume-type - The volume type of the EBS volume (gp2 + // | standard | io1). + // + // description - The description of the image (provided during image creation). + // + // hypervisor - The hypervisor type (ovm | xen). + // + // image-id - The ID of the image. + // + // image-type - The image type (machine | kernel | ramdisk). + // + // is-public - A Boolean that indicates whether the image is public. + // + // kernel-id - The kernel ID. + // + // manifest-location - The location of the image manifest. + // + // name - The name of the AMI (provided during image creation). + // + // owner-alias - The AWS account alias (for example, amazon). + // + // owner-id - The AWS account ID of the image owner. + // + // platform - The platform. To only list Windows-based AMIs, use windows. + // + // product-code - The product code. + // + // product-code.type - The type of the product code (devpay | marketplace). + // + // ramdisk-id - The RAM disk ID. + // + // root-device-name - The name of the root device volume (for example, /dev/sda1). + // + // root-device-type - The type of the root device volume (ebs | instance-store). + // + // state - The state of the image (available | pending | failed). + // + // state-reason-code - The reason code for the state change. + // + // state-reason-message - The message for the state change. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // virtualization-type - The virtualization type (paravirtual | hvm). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more image IDs. + // + // Default: Describes all images available to you. + ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"` + + // Filters the images by the owner. Specify an AWS account ID, amazon (owner + // is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the + // sender of the request). Omitting this option returns all images for which + // you have launch permissions, regardless of ownership. + Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` + + metadataDescribeImagesInput `json:"-" xml:"-"` +} + +type metadataDescribeImagesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesInput) GoString() string { + return s.String() +} + +type DescribeImagesOutput struct { + // Information about one or more images. + Images []*Image `locationName:"imagesSet" locationNameList:"item" type:"list"` + + metadataDescribeImagesOutput `json:"-" xml:"-"` +} + +type metadataDescribeImagesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesOutput) GoString() string { + return s.String() +} + +type DescribeImportImageTasksInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import image task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single request. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` + + metadataDescribeImportImageTasksInput `json:"-" xml:"-"` +} + +type metadataDescribeImportImageTasksInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeImportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksInput) GoString() string { + return s.String() +} + +type DescribeImportImageTasksOutput struct { + // A list of zero or more import image tasks that are currently active or were + // completed or canceled in the previous 7 days. + ImportImageTasks []*ImportImageTask `locationName:"importImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeImportImageTasksOutput `json:"-" xml:"-"` +} + +type metadataDescribeImportImageTasksOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeImportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksOutput) GoString() string { + return s.String() +} + +type DescribeImportSnapshotTasksInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import snapshot task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single request. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` + + metadataDescribeImportSnapshotTasksInput `json:"-" xml:"-"` +} + +type metadataDescribeImportSnapshotTasksInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksInput) GoString() string { + return s.String() +} + +type DescribeImportSnapshotTasksOutput struct { + // A list of zero or more import snapshot tasks that are currently active or + // were completed or canceled in the previous 7 days. + ImportSnapshotTasks []*ImportSnapshotTask `locationName:"importSnapshotTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeImportSnapshotTasksOutput `json:"-" xml:"-"` +} + +type metadataDescribeImportSnapshotTasksOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksOutput) GoString() string { + return s.String() +} + +type DescribeInstanceAttributeInput struct { + // The instance attribute. + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + metadataDescribeInstanceAttributeInput `json:"-" xml:"-"` +} + +type metadataDescribeInstanceAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeInput) GoString() string { + return s.String() +} + +// Describes an instance attribute. +type DescribeInstanceAttributeOutput struct { + // The block device mapping of the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance through the Amazon + // EC2 console, CLI, or API; otherwise, you can. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Indicates whether the instance is optimized for EBS I/O. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // The security groups associated with the instance. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // The instance type. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // The name of the root device (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` + + // The value to use for a resource attribute. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // The Base64-encoded MIME user data. + UserData *AttributeValue `locationName:"userData" type:"structure"` + + metadataDescribeInstanceAttributeOutput `json:"-" xml:"-"` +} + +type metadataDescribeInstanceAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeOutput) GoString() string { + return s.String() +} + +type DescribeInstanceStatusInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone of the instance. + // + // event.code - The code for the scheduled event (instance-reboot | system-reboot + // | system-maintenance | instance-retirement | instance-stop). + // + // event.description - A description of the event. + // + // event.not-after - The latest end time for the scheduled event (for example, + // 2014-09-15T17:15:20.000Z). + // + // event.not-before - The earliest start time for the scheduled event (for + // example, 2014-09-15T17:15:20.000Z). + // + // instance-state-code - The code for the instance state, as a 16-bit unsigned + // integer. The high byte is an opaque internal value and should be ignored. + // The low byte is set based on the state represented. The valid values are + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // instance-state-name - The state of the instance (pending | running | shutting-down + // | terminated | stopping | stopped). + // + // instance-status.reachability - Filters on instance status where the name + // is reachability (passed | failed | initializing | insufficient-data). + // + // instance-status.status - The status of the instance (ok | impaired | initializing + // | insufficient-data | not-applicable). + // + // system-status.reachability - Filters on system status where the name is + // reachability (passed | failed | initializing | insufficient-data). + // + // system-status.status - The system status of the instance (ok | impaired + // | initializing | insufficient-data | not-applicable). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // When true, includes the health status for all instances. When false, includes + // the health status for running instances only. + // + // Default: false + IncludeAllInstances *bool `locationName:"includeAllInstances" type:"boolean"` + + // One or more instance IDs. + // + // Default: Describes all your instances. + // + // Constraints: Maximum 100 explicitly specified instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` + + metadataDescribeInstanceStatusInput `json:"-" xml:"-"` +} + +type metadataDescribeInstanceStatusInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusInput) GoString() string { + return s.String() +} + +type DescribeInstanceStatusOutput struct { + // One or more instance status descriptions. + InstanceStatuses []*InstanceStatus `locationName:"instanceStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeInstanceStatusOutput `json:"-" xml:"-"` +} + +type metadataDescribeInstanceStatusOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusOutput) GoString() string { + return s.String() +} + +type DescribeInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // architecture - The instance architecture (i386 | x86_64). + // + // availability-zone - The Availability Zone of the instance. + // + // block-device-mapping.attach-time - The attach time for an EBS volume mapped + // to the instance, for example, 2010-09-15T17:15:20.000Z. + // + // block-device-mapping.delete-on-termination - A Boolean that indicates + // whether the EBS volume is deleted on instance termination. + // + // block-device-mapping.device-name - The device name for the EBS volume + // (for example, /dev/sdh or xvdh). + // + // block-device-mapping.status - The status for the EBS volume (attaching + // | attached | detaching | detached). + // + // block-device-mapping.volume-id - The volume ID of the EBS volume. + // + // client-token - The idempotency token you provided when you launched the + // instance. + // + // dns-name - The public DNS name of the instance. + // + // group-id - The ID of the security group for the instance. EC2-Classic + // only. + // + // group-name - The name of the security group for the instance. EC2-Classic + // only. + // + // hypervisor - The hypervisor type of the instance (ovm | xen). + // + // iam-instance-profile.arn - The instance profile associated with the instance. + // Specified as an ARN. + // + // image-id - The ID of the image used to launch the instance. + // + // instance-id - The ID of the instance. + // + // instance-lifecycle - Indicates whether this is a Spot Instance (spot). + // + // instance-state-code - The state of the instance, as a 16-bit unsigned + // integer. The high byte is an opaque internal value and should be ignored. + // The low byte is set based on the state represented. The valid values are: + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // instance-state-name - The state of the instance (pending | running | shutting-down + // | terminated | stopping | stopped). + // + // instance-type - The type of instance (for example, t2.micro). + // + // instance.group-id - The ID of the security group for the instance. + // + // instance.group-name - The name of the security group for the instance. + // + // ip-address - The public IP address of the instance. + // + // kernel-id - The kernel ID. + // + // key-name - The name of the key pair used when the instance was launched. + // + // launch-index - When launching multiple instances, this is the index for + // the instance in the launch group (for example, 0, 1, 2, and so on). + // + // launch-time - The time when the instance was launched. + // + // monitoring-state - Indicates whether monitoring is enabled for the instance + // (disabled | enabled). + // + // owner-id - The AWS account ID of the instance owner. + // + // placement-group-name - The name of the placement group for the instance. + // + // platform - The platform. Use windows if you have Windows instances; otherwise, + // leave blank. + // + // private-dns-name - The private DNS name of the instance. + // + // private-ip-address - The private IP address of the instance. + // + // product-code - The product code associated with the AMI used to launch + // the instance. + // + // product-code.type - The type of product code (devpay | marketplace). + // + // ramdisk-id - The RAM disk ID. + // + // reason - The reason for the current state of the instance (for example, + // shows "User Initiated [date]" when you stop or terminate the instance). Similar + // to the state-reason-code filter. + // + // requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // reservation-id - The ID of the instance's reservation. A reservation ID + // is created any time you launch an instance. A reservation ID has a one-to-one + // relationship with an instance launch request, but can be associated with + // more than one instance if you launch multiple instances using the same launch + // request. For example, if you launch one instance, you'll get one reservation + // ID. If you launch ten instances using the same launch request, you'll also + // get one reservation ID. + // + // root-device-name - The name of the root device for the instance (for example, + // /dev/sda1 or /dev/xvda). + // + // root-device-type - The type of root device that the instance uses (ebs + // | instance-store). + // + // source-dest-check - Indicates whether the instance performs source/destination + // checking. A value of true means that checking is enabled, and false means + // checking is disabled. The value must be false for the instance to perform + // network address translation (NAT) in your VPC. + // + // spot-instance-request-id - The ID of the Spot Instance request. + // + // state-reason-code - The reason code for the state change. + // + // state-reason-message - A message that describes the state change. + // + // subnet-id - The ID of the subnet for the instance. + // + // tag:key=value - The key/value combination of a tag assigned to the resource, + // where tag:key is the tag's key. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // tenancy - The tenancy of an instance (dedicated | default). + // + // virtualization-type - The virtualization type of the instance (paravirtual + // | hvm). + // + // vpc-id - The ID of the VPC that the instance is running in. + // + // network-interface.description - The description of the network interface. + // + // network-interface.subnet-id - The ID of the subnet for the network interface. + // + // network-interface.vpc-id - The ID of the VPC for the network interface. + // + // network-interface.network-interface-id - The ID of the network interface. + // + // network-interface.owner-id - The ID of the owner of the network interface. + // + // network-interface.availability-zone - The Availability Zone for the network + // interface. + // + // network-interface.requester-id - The requester ID for the network interface. + // + // network-interface.requester-managed - Indicates whether the network interface + // is being managed by AWS. + // + // network-interface.status - The status of the network interface (available) + // | in-use). + // + // network-interface.mac-address - The MAC address of the network interface. + // + // network-interface-private-dns-name - The private DNS name of the network + // interface. + // + // network-interface.source-dest-check - Whether the network interface performs + // source/destination checking. A value of true means checking is enabled, and + // false means checking is disabled. The value must be false for the network + // interface to perform network address translation (NAT) in your VPC. + // + // network-interface.group-id - The ID of a security group associated with + // the network interface. + // + // network-interface.group-name - The name of a security group associated + // with the network interface. + // + // network-interface.attachment.attachment-id - The ID of the interface attachment. + // + // network-interface.attachment.instance-id - The ID of the instance to which + // the network interface is attached. + // + // network-interface.attachment.instance-owner-id - The owner ID of the instance + // to which the network interface is attached. + // + // network-interface.addresses.private-ip-address - The private IP address + // associated with the network interface. + // + // network-interface.attachment.device-index - The device index to which + // the network interface is attached. + // + // network-interface.attachment.status - The status of the attachment (attaching + // | attached | detaching | detached). + // + // network-interface.attachment.attach-time - The time that the network interface + // was attached to an instance. + // + // network-interface.attachment.delete-on-termination - Specifies whether + // the attachment is deleted when an instance is terminated. + // + // network-interface.addresses.primary - Specifies whether the IP address + // of the network interface is the primary private IP address. + // + // network-interface.addresses.association.public-ip - The ID of the association + // of an Elastic IP address with a network interface. + // + // network-interface.addresses.association.ip-owner-id - The owner ID of + // the private IP address associated with the network interface. + // + // association.public-ip - The address of the Elastic IP address bound to + // the network interface. + // + // association.ip-owner-id - The owner of the Elastic IP address associated + // with the network interface. + // + // association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address for your network interface. + // + // association.association-id - The association ID returned when the network + // interface was associated with an IP address. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. + // + // Default: Describes all your instances. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to request the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeInstancesInput `json:"-" xml:"-"` +} + +type metadataDescribeInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +type DescribeInstancesOutput struct { + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more reservations. + Reservations []*Reservation `locationName:"reservationSet" locationNameList:"item" type:"list"` + + metadataDescribeInstancesOutput `json:"-" xml:"-"` +} + +type metadataDescribeInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +type DescribeInternetGatewaysInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.state - The current state of the attachment between the gateway + // and the VPC (available). Present only if a VPC is attached. + // + // attachment.vpc-id - The ID of an attached VPC. + // + // internet-gateway-id - The ID of the Internet gateway. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Internet gateway IDs. + // + // Default: Describes all your Internet gateways. + InternetGatewayIds []*string `locationName:"internetGatewayId" locationNameList:"item" type:"list"` + + metadataDescribeInternetGatewaysInput `json:"-" xml:"-"` +} + +type metadataDescribeInternetGatewaysInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysInput) GoString() string { + return s.String() +} + +type DescribeInternetGatewaysOutput struct { + // Information about one or more Internet gateways. + InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"` + + metadataDescribeInternetGatewaysOutput `json:"-" xml:"-"` +} + +type metadataDescribeInternetGatewaysOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysOutput) GoString() string { + return s.String() +} + +type DescribeKeyPairsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // fingerprint - The fingerprint of the key pair. + // + // key-name - The name of the key pair. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more key pair names. + // + // Default: Describes all your key pairs. + KeyNames []*string `locationName:"KeyName" locationNameList:"KeyName" type:"list"` + + metadataDescribeKeyPairsInput `json:"-" xml:"-"` +} + +type metadataDescribeKeyPairsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeKeyPairsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsInput) GoString() string { + return s.String() +} + +type DescribeKeyPairsOutput struct { + // Information about one or more key pairs. + KeyPairs []*KeyPairInfo `locationName:"keySet" locationNameList:"item" type:"list"` + + metadataDescribeKeyPairsOutput `json:"-" xml:"-"` +} + +type metadataDescribeKeyPairsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeKeyPairsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsOutput) GoString() string { + return s.String() +} + +type DescribeMovingAddressesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic). + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value outside of this range, an error is returned. + // + // Default: If no value is provided, the default is 1000. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more Elastic IP addresses. + PublicIps []*string `locationName:"publicIp" locationNameList:"item" type:"list"` + + metadataDescribeMovingAddressesInput `json:"-" xml:"-"` +} + +type metadataDescribeMovingAddressesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeMovingAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesInput) GoString() string { + return s.String() +} + +type DescribeMovingAddressesOutput struct { + // The status for each Elastic IP address. + MovingAddressStatuses []*MovingAddressStatus `locationName:"movingAddressStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeMovingAddressesOutput `json:"-" xml:"-"` +} + +type metadataDescribeMovingAddressesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeMovingAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesOutput) GoString() string { + return s.String() +} + +type DescribeNetworkAclsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // association.association-id - The ID of an association ID for the ACL. + // + // association.network-acl-id - The ID of the network ACL involved in the + // association. + // + // association.subnet-id - The ID of the subnet involved in the association. + // + // default - Indicates whether the ACL is the default network ACL for the + // VPC. + // + // entry.cidr - The CIDR range specified in the entry. + // + // entry.egress - Indicates whether the entry applies to egress traffic. + // + // entry.icmp.code - The ICMP code specified in the entry, if any. + // + // entry.icmp.type - The ICMP type specified in the entry, if any. + // + // entry.port-range.from - The start of the port range specified in the entry. + // + // entry.port-range.to - The end of the port range specified in the entry. + // + // entry.protocol - The protocol specified in the entry (tcp | udp | icmp + // or a protocol number). + // + // entry.rule-action - Allows or denies the matching traffic (allow | deny). + // + // entry.rule-number - The number of an entry (in other words, rule) in the + // ACL's set of entries. + // + // network-acl-id - The ID of the network ACL. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the network ACL. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more network ACL IDs. + // + // Default: Describes all your network ACLs. + NetworkAclIds []*string `locationName:"NetworkAclId" locationNameList:"item" type:"list"` + + metadataDescribeNetworkAclsInput `json:"-" xml:"-"` +} + +type metadataDescribeNetworkAclsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkAclsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsInput) GoString() string { + return s.String() +} + +type DescribeNetworkAclsOutput struct { + // Information about one or more network ACLs. + NetworkAcls []*NetworkAcl `locationName:"networkAclSet" locationNameList:"item" type:"list"` + + metadataDescribeNetworkAclsOutput `json:"-" xml:"-"` +} + +type metadataDescribeNetworkAclsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkAclsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsOutput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfaceAttributeInput struct { + // The attribute of the network interface. + Attribute *string `locationName:"attribute" type:"string" enum:"NetworkInterfaceAttribute"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + metadataDescribeNetworkInterfaceAttributeInput `json:"-" xml:"-"` +} + +type metadataDescribeNetworkInterfaceAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfaceAttributeOutput struct { + // The attachment (if any) of the network interface. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description of the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The security groups associated with the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Indicates whether source/destination checking is enabled. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` + + metadataDescribeNetworkInterfaceAttributeOutput `json:"-" xml:"-"` +} + +type metadataDescribeNetworkInterfaceAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfacesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // addresses.private-ip-address - The private IP addresses associated with + // the network interface. + // + // addresses.primary - Whether the private IP address is the primary IP address + // associated with the network interface. + // + // addresses.association.public-ip - The association ID returned when the + // network interface was associated with the Elastic IP address. + // + // addresses.association.owner-id - The owner ID of the addresses associated + // with the network interface. + // + // association.association-id - The association ID returned when the network + // interface was associated with an IP address. + // + // association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address for your network interface. + // + // association.ip-owner-id - The owner of the Elastic IP address associated + // with the network interface. + // + // association.public-ip - The address of the Elastic IP address bound to + // the network interface. + // + // association.public-dns-name - The public DNS name for the network interface. + // + // attachment.attachment-id - The ID of the interface attachment. + // + // attachment.instance-id - The ID of the instance to which the network interface + // is attached. + // + // attachment.instance-owner-id - The owner ID of the instance to which the + // network interface is attached. + // + // attachment.device-index - The device index to which the network interface + // is attached. + // + // attachment.status - The status of the attachment (attaching | attached + // | detaching | detached). + // + // attachment.attach.time - The time that the network interface was attached + // to an instance. + // + // attachment.delete-on-termination - Indicates whether the attachment is + // deleted when an instance is terminated. + // + // availability-zone - The Availability Zone of the network interface. + // + // description - The description of the network interface. + // + // group-id - The ID of a security group associated with the network interface. + // + // group-name - The name of a security group associated with the network + // interface. + // + // mac-address - The MAC address of the network interface. + // + // network-interface-id - The ID of the network interface. + // + // owner-id - The AWS account ID of the network interface owner. + // + // private-ip-address - The private IP address or addresses of the network + // interface. + // + // private-dns-name - The private DNS name of the network interface. + // + // requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // requester-managed - Indicates whether the network interface is being managed + // by an AWS service (for example, AWS Management Console, Auto Scaling, and + // so on). + // + // source-desk-check - Indicates whether the network interface performs source/destination + // checking. A value of true means checking is enabled, and false means checking + // is disabled. The value must be false for the network interface to perform + // Network Address Translation (NAT) in your VPC. + // + // status - The status of the network interface. If the network interface + // is not attached to an instance, the status is available; if a network interface + // is attached to an instance the status is in-use. + // + // subnet-id - The ID of the subnet for the network interface. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the network interface. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // One or more network interface IDs. + // + // Default: Describes all your network interfaces. + NetworkInterfaceIds []*string `locationName:"NetworkInterfaceId" locationNameList:"item" type:"list"` + + metadataDescribeNetworkInterfacesInput `json:"-" xml:"-"` +} + +type metadataDescribeNetworkInterfacesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesInput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfacesOutput struct { + // Information about one or more network interfaces. + NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + metadataDescribeNetworkInterfacesOutput `json:"-" xml:"-"` +} + +type metadataDescribeNetworkInterfacesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesOutput) GoString() string { + return s.String() +} + +type DescribePlacementGroupsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // group-name - The name of the placement group. + // + // state - The state of the placement group (pending | available | deleting + // | deleted). + // + // strategy - The strategy of the placement group (cluster). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more placement group names. + // + // Default: Describes all your placement groups, or only those otherwise specified. + GroupNames []*string `locationName:"groupName" type:"list"` + + metadataDescribePlacementGroupsInput `json:"-" xml:"-"` +} + +type metadataDescribePlacementGroupsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribePlacementGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsInput) GoString() string { + return s.String() +} + +type DescribePlacementGroupsOutput struct { + // One or more placement groups. + PlacementGroups []*PlacementGroup `locationName:"placementGroupSet" locationNameList:"item" type:"list"` + + metadataDescribePlacementGroupsOutput `json:"-" xml:"-"` +} + +type metadataDescribePlacementGroupsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribePlacementGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsOutput) GoString() string { + return s.String() +} + +type DescribePrefixListsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // prefix-list-id: The ID of a prefix list. + // + // prefix-list-name: The name of a prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value specified is greater than 1000, we return only + // 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` + + metadataDescribePrefixListsInput `json:"-" xml:"-"` +} + +type metadataDescribePrefixListsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribePrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsInput) GoString() string { + return s.String() +} + +type DescribePrefixListsOutput struct { + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // All available prefix lists. + PrefixLists []*PrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` + + metadataDescribePrefixListsOutput `json:"-" xml:"-"` +} + +type metadataDescribePrefixListsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribePrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsOutput) GoString() string { + return s.String() +} + +type DescribeRegionsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com). + // + // region-name - The name of the region (for example, us-east-1). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of one or more regions. + RegionNames []*string `locationName:"RegionName" locationNameList:"RegionName" type:"list"` + + metadataDescribeRegionsInput `json:"-" xml:"-"` +} + +type metadataDescribeRegionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeRegionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsInput) GoString() string { + return s.String() +} + +type DescribeRegionsOutput struct { + // Information about one or more regions. + Regions []*Region `locationName:"regionInfo" locationNameList:"item" type:"list"` + + metadataDescribeRegionsOutput `json:"-" xml:"-"` +} + +type metadataDescribeRegionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeRegionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // duration - The duration of the Reserved Instance (one year or three years), + // in seconds (31536000 | 94608000). + // + // end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z). + // + // fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // instance-type - The instance type on which the Reserved Instance can be + // used. + // + // product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description will + // only be displayed to EC2-Classic account holders and are for use with Amazon + // VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon + // VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | + // Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows + // with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows + // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows + // with SQL Server Enterprise (Amazon VPC)). + // + // reserved-instances-id - The ID of the Reserved Instance. + // + // start - The time at which the Reserved Instance purchase request was placed + // (for example, 2014-08-07T11:54:42.000Z). + // + // state - The state of the Reserved Instance (payment-pending | active | + // payment-failed | retired). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // One or more Reserved Instance IDs. + // + // Default: Describes all your Reserved Instances, or only those otherwise + // specified. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list"` + + metadataDescribeReservedInstancesInput `json:"-" xml:"-"` +} + +type metadataDescribeReservedInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesListingsInput struct { + // One or more filters. + // + // reserved-instances-id - The ID of the Reserved Instances. + // + // reserved-instances-listing-id - The ID of the Reserved Instances listing. + // + // status - The status of the Reserved Instance listing (pending | active + // | cancelled | closed). + // + // status-message - The reason for the status. + Filters []*Filter `locationName:"filters" locationNameList:"Filter" type:"list"` + + // One or more Reserved Instance IDs. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // One or more Reserved Instance Listing IDs. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` + + metadataDescribeReservedInstancesListingsInput `json:"-" xml:"-"` +} + +type metadataDescribeReservedInstancesListingsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesListingsOutput struct { + // Information about the Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` + + metadataDescribeReservedInstancesListingsOutput `json:"-" xml:"-"` +} + +type metadataDescribeReservedInstancesListingsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesModificationsInput struct { + // One or more filters. + // + // client-token - The idempotency token for the modification request. + // + // create-date - The time when the modification request was created. + // + // effective-date - The time when the modification becomes effective. + // + // modification-result.reserved-instances-id - The ID for the Reserved Instances + // created as part of the modification request. This ID is only available when + // the status of the modification is fulfilled. + // + // modification-result.target-configuration.availability-zone - The Availability + // Zone for the new Reserved Instances. + // + // modification-result.target-configuration.instance-count - The number + // of new Reserved Instances. + // + // modification-result.target-configuration.instance-type - The instance + // type of the new Reserved Instances. + // + // modification-result.target-configuration.platform - The network platform + // of the new Reserved Instances (EC2-Classic | EC2-VPC). + // + // reserved-instances-id - The ID of the Reserved Instances modified. + // + // reserved-instances-modification-id - The ID of the modification request. + // + // status - The status of the Reserved Instances modification request (processing + // | fulfilled | failed). + // + // status-message - The reason for the status. + // + // update-date - The time when the modification request was last updated. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // IDs for the submitted modification request. + ReservedInstancesModificationIds []*string `locationName:"ReservedInstancesModificationId" locationNameList:"ReservedInstancesModificationId" type:"list"` + + metadataDescribeReservedInstancesModificationsInput `json:"-" xml:"-"` +} + +type metadataDescribeReservedInstancesModificationsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesModificationsOutput struct { + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance modification information. + ReservedInstancesModifications []*ReservedInstancesModification `locationName:"reservedInstancesModificationsSet" locationNameList:"item" type:"list"` + + metadataDescribeReservedInstancesModificationsOutput `json:"-" xml:"-"` +} + +type metadataDescribeReservedInstancesModificationsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOfferingsInput struct { + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // duration - The duration of the Reserved Instance (for example, one year + // or three years), in seconds (31536000 | 94608000). + // + // fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // instance-type - The instance type on which the Reserved Instance can be + // used. + // + // marketplace - Set to true to show only Reserved Instance Marketplace offerings. + // When this filter is not used, which is the default behavior, all offerings + // from AWS and Reserved Instance Marketplace are listed. + // + // product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description will + // only be displayed to EC2-Classic account holders and are for use with Amazon + // VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon + // VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | + // Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows + // with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows + // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows + // with SQL Server Enterprise (Amazon VPC)) + // + // reserved-instances-offering-id - The Reserved Instances offering ID. + // + // usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Include Marketplace offerings in the response. + IncludeMarketplace *bool `type:"boolean"` + + // The tenancy of the Reserved Instance offering. A Reserved Instance with dedicated + // tenancy runs on single-tenant hardware and can only be launched within a + // VPC. + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. For more information, + // see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The maximum duration (in seconds) to filter when searching for offerings. + // + // Default: 94608000 (3 years) + MaxDuration *int64 `type:"long"` + + // The maximum number of instances to filter when searching for offerings. + // + // Default: 20 + MaxInstanceCount *int64 `type:"integer"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. The maximum is 100. + // + // Default: 100 + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The minimum duration (in seconds) to filter when searching for offerings. + // + // Default: 2592000 (1 month) + MinDuration *int64 `type:"long"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. Instances that include + // (Amazon VPC) in the description are for use with Amazon VPC. + ProductDescription *string `type:"string" enum:"RIProductDescription"` + + // One or more Reserved Instances offering IDs. + ReservedInstancesOfferingIds []*string `locationName:"ReservedInstancesOfferingId" type:"list"` + + metadataDescribeReservedInstancesOfferingsInput `json:"-" xml:"-"` +} + +type metadataDescribeReservedInstancesOfferingsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOfferingsOutput struct { + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of Reserved Instances offerings. + ReservedInstancesOfferings []*ReservedInstancesOffering `locationName:"reservedInstancesOfferingsSet" locationNameList:"item" type:"list"` + + metadataDescribeReservedInstancesOfferingsOutput `json:"-" xml:"-"` +} + +type metadataDescribeReservedInstancesOfferingsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOutput struct { + // A list of Reserved Instances. + ReservedInstances []*ReservedInstances `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` + + metadataDescribeReservedInstancesOutput `json:"-" xml:"-"` +} + +type metadataDescribeReservedInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOutput) GoString() string { + return s.String() +} + +type DescribeRouteTablesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // association.route-table-association-id - The ID of an association ID for + // the route table. + // + // association.route-table-id - The ID of the route table involved in the + // association. + // + // association.subnet-id - The ID of the subnet involved in the association. + // + // association.main - Indicates whether the route table is the main route + // table for the VPC. + // + // route-table-id - The ID of the route table. + // + // route.destination-cidr-block - The CIDR range specified in a route in + // the table. + // + // route.destination-prefix-list-id - The ID (prefix) of the AWS service + // specified in a route in the table. + // + // route.gateway-id - The ID of a gateway specified in a route in the table. + // + // route.instance-id - The ID of an instance specified in a route in the + // table. + // + // route.origin - Describes how the route was created. CreateRouteTable indicates + // that the route was automatically created when the route table was created; + // CreateRoute indicates that the route was manually added to the route table; + // EnableVgwRoutePropagation indicates that the route was propagated by route + // propagation. + // + // route.state - The state of a route in the route table (active | blackhole). + // The blackhole state indicates that the route's target isn't available (for + // example, the specified gateway isn't attached to the VPC, the specified NAT + // instance has been terminated, and so on). + // + // route.vpc-peering-connection-id - The ID of a VPC peering connection specified + // in a route in the table. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the route table. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more route table IDs. + // + // Default: Describes all your route tables. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` + + metadataDescribeRouteTablesInput `json:"-" xml:"-"` +} + +type metadataDescribeRouteTablesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeRouteTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesInput) GoString() string { + return s.String() +} + +type DescribeRouteTablesOutput struct { + // Information about one or more route tables. + RouteTables []*RouteTable `locationName:"routeTableSet" locationNameList:"item" type:"list"` + + metadataDescribeRouteTablesOutput `json:"-" xml:"-"` +} + +type metadataDescribeRouteTablesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeRouteTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesOutput) GoString() string { + return s.String() +} + +type DescribeSecurityGroupsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. If using multiple filters for rules, the results include + // security groups for which any combination of rules - not necessarily a single + // rule - match all filters. + // + // description - The description of the security group. + // + // egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service + // to which the security group allows access. + // + // group-id - The ID of the security group. + // + // group-name - The name of the security group. + // + // ip-permission.cidr - A CIDR range that has been granted permission. + // + // ip-permission.from-port - The start of port range for the TCP and UDP + // protocols, or an ICMP type number. + // + // ip-permission.group-id - The ID of a security group that has been granted + // permission. + // + // ip-permission.group-name - The name of a security group that has been + // granted permission. + // + // ip-permission.protocol - The IP protocol for the permission (tcp | udp + // | icmp or a protocol number). + // + // ip-permission.to-port - The end of port range for the TCP and UDP protocols, + // or an ICMP code. + // + // ip-permission.user-id - The ID of an AWS account that has been granted + // permission. + // + // owner-id - The AWS account ID of the owner of the security group. + // + // tag-key - The key of a tag assigned to the security group. + // + // tag-value - The value of a tag assigned to the security group. + // + // vpc-id - The ID of the VPC specified when the security group was created. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more security group IDs. Required for security groups in a nondefault + // VPC. + // + // Default: Describes all your security groups. + GroupIds []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // [EC2-Classic and default VPC only] One or more security group names. You + // can specify either the security group name or the security group ID. For + // security groups in a nondefault VPC, use the group-name filter to describe + // security groups by name. + // + // Default: Describes all your security groups. + GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"` + + metadataDescribeSecurityGroupsInput `json:"-" xml:"-"` +} + +type metadataDescribeSecurityGroupsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsInput) GoString() string { + return s.String() +} + +type DescribeSecurityGroupsOutput struct { + // Information about one or more security groups. + SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"` + + metadataDescribeSecurityGroupsOutput `json:"-" xml:"-"` +} + +type metadataDescribeSecurityGroupsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeSnapshotAttributeInput struct { + // The snapshot attribute you would like to view. + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + SnapshotId *string `type:"string" required:"true"` + + metadataDescribeSnapshotAttributeInput `json:"-" xml:"-"` +} + +type metadataDescribeSnapshotAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeInput) GoString() string { + return s.String() +} + +type DescribeSnapshotAttributeOutput struct { + // A list of permissions for creating volumes from the snapshot. + CreateVolumePermissions []*CreateVolumePermission `locationName:"createVolumePermission" locationNameList:"item" type:"list"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the EBS snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + metadataDescribeSnapshotAttributeOutput `json:"-" xml:"-"` +} + +type metadataDescribeSnapshotAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type DescribeSnapshotsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // description - A description of the snapshot. + // + // owner-alias - The AWS account alias (for example, amazon) that owns the + // snapshot. + // + // owner-id - The ID of the AWS account that owns the snapshot. + // + // progress - The progress of the snapshot, as a percentage (for example, + // 80%). + // + // snapshot-id - The snapshot ID. + // + // start-time - The time stamp when the snapshot was initiated. + // + // status - The status of the snapshot (pending | completed | error). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // volume-id - The ID of the volume the snapshot is for. + // + // volume-size - The size of the volume, in GiB. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of snapshot results returned by DescribeSnapshots in paginated + // output. When this parameter is used, DescribeSnapshots only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeSnapshots + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. If this parameter is not used, then DescribeSnapshots returns + // all results. You cannot specify this parameter and the snapshot IDs parameter + // in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeSnapshots + // request where MaxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the NextToken value. This value is null when there are no more results + // to return. + NextToken *string `type:"string"` + + // Returns the snapshots owned by the specified owner. Multiple owners can be + // specified. + OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` + + // One or more AWS accounts IDs that can create volumes from the snapshot. + RestorableByUserIds []*string `locationName:"RestorableBy" type:"list"` + + // One or more snapshot IDs. + // + // Default: Describes snapshots for which you have launch permissions. + SnapshotIds []*string `locationName:"SnapshotId" locationNameList:"SnapshotId" type:"list"` + + metadataDescribeSnapshotsInput `json:"-" xml:"-"` +} + +type metadataDescribeSnapshotsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +type DescribeSnapshotsOutput struct { + // The NextToken value to include in a future DescribeSnapshots request. When + // the results of a DescribeSnapshots request exceed MaxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the snapshots. + Snapshots []*Snapshot `locationName:"snapshotSet" locationNameList:"item" type:"list"` + + metadataDescribeSnapshotsOutput `json:"-" xml:"-"` +} + +type metadataDescribeSnapshotsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataDescribeSpotDatafeedSubscriptionInput `json:"-" xml:"-"` +} + +type metadataDescribeSpotDatafeedSubscriptionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionOutput struct { + // The Spot instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` + + metadataDescribeSpotDatafeedSubscriptionOutput `json:"-" xml:"-"` +} + +type metadataDescribeSpotDatafeedSubscriptionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + metadataDescribeSpotFleetInstancesInput `json:"-" xml:"-"` +} + +type metadataDescribeSpotFleetInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesOutput struct { + // The running instances. Note that this list is refreshed periodically and + // might be out of date. + ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list" required:"true"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + metadataDescribeSpotFleetInstancesOutput `json:"-" xml:"-"` +} + +type metadataDescribeSpotFleetInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of events to describe. By default, all events are described. + EventType *string `locationName:"eventType" type:"string" enum:"EventType"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + metadataDescribeSpotFleetRequestHistoryInput `json:"-" xml:"-"` +} + +type metadataDescribeSpotFleetRequestHistoryInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryOutput struct { + // Information about the events in the history of the Spot fleet request. + HistoryRecords []*HistoryRecord `locationName:"historyRecordSet" locationNameList:"item" type:"list" required:"true"` + + // The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // All records up to this time were retrieved. + // + // If nextToken indicates that there are more results, this value is not present. + LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + metadataDescribeSpotFleetRequestHistoryOutput `json:"-" xml:"-"` +} + +type metadataDescribeSpotFleetRequestHistoryOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The IDs of the Spot fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list"` + + metadataDescribeSpotFleetRequestsInput `json:"-" xml:"-"` +} + +type metadataDescribeSpotFleetRequestsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsOutput struct { + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the configuration of your Spot fleet. + SpotFleetRequestConfigs []*SpotFleetRequestConfig `locationName:"spotFleetRequestConfigSet" locationNameList:"item" type:"list" required:"true"` + + metadataDescribeSpotFleetRequestsOutput `json:"-" xml:"-"` +} + +type metadataDescribeSpotFleetRequestsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone-group - The Availability Zone group. + // + // create-time - The time stamp when the Spot instance request was created. + // + // fault-code - The fault code related to the request. + // + // fault-message - The fault message related to the request. + // + // instance-id - The ID of the instance that fulfilled the request. + // + // launch-group - The Spot instance launch group. + // + // launch.block-device-mapping.delete-on-termination - Indicates whether + // the Amazon EBS volume is deleted on instance termination. + // + // launch.block-device-mapping.device-name - The device name for the Amazon + // EBS volume (for example, /dev/sdh). + // + // launch.block-device-mapping.snapshot-id - The ID of the snapshot used + // for the Amazon EBS volume. + // + // launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, + // in GiB. + // + // launch.block-device-mapping.volume-type - The type of the Amazon EBS volume + // (gp2 | standard | io1). + // + // launch.group-id - The security group for the instance. + // + // launch.image-id - The ID of the AMI. + // + // launch.instance-type - The type of instance (for example, m3.medium). + // + // launch.kernel-id - The kernel ID. + // + // launch.key-name - The name of the key pair the instance launched with. + // + // launch.monitoring-enabled - Whether monitoring is enabled for the Spot + // instance. + // + // launch.ramdisk-id - The RAM disk ID. + // + // network-interface.network-interface-id - The ID of the network interface. + // + // network-interface.device-index - The index of the device for the network + // interface attachment on the instance. + // + // network-interface.subnet-id - The ID of the subnet for the instance. + // + // network-interface.description - A description of the network interface. + // + // network-interface.private-ip-address - The primary private IP address + // of the network interface. + // + // network-interface.delete-on-termination - Indicates whether the network + // interface is deleted when the instance is terminated. + // + // network-interface.group-id - The ID of the security group associated with + // the network interface. + // + // network-interface.group-name - The name of the security group associated + // with the network interface. + // + // network-interface.addresses.primary - Indicates whether the IP address + // is the primary private IP address. + // + // product-description - The product description associated with the instance + // (Linux/UNIX | Windows). + // + // spot-instance-request-id - The Spot instance request ID. + // + // spot-price - The maximum hourly price for any Spot instance launched to + // fulfill the request. + // + // state - The state of the Spot instance request (open | active | closed + // | cancelled | failed). Spot bid status information can help you track your + // Amazon EC2 Spot instance requests. For more information, see Spot Bid Status + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // status-code - The short code describing the most recent evaluation of + // your Spot instance request. + // + // status-message - The message explaining the status of the Spot instance + // request. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of Spot instance request (one-time | persistent). + // + // launched-availability-zone - The Availability Zone in which the bid is + // launched. + // + // valid-from - The start date of the request. + // + // valid-until - The end date of the request. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Spot instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list"` + + metadataDescribeSpotInstanceRequestsInput `json:"-" xml:"-"` +} + +type metadataDescribeSpotInstanceRequestsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsOutput struct { + // One or more Spot instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` + + metadataDescribeSpotInstanceRequestsOutput `json:"-" xml:"-"` +} + +type metadataDescribeSpotInstanceRequestsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryInput struct { + // Filters the results by the specified Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The date and time, up to the current date, from which to stop retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // One or more filters. + // + // availability-zone - The Availability Zone for which prices should be returned. + // + // instance-type - The type of instance (for example, m3.medium). + // + // product-description - The product description for the Spot price (Linux/UNIX + // | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) + // | Windows (Amazon VPC)). + // + // spot-price - The Spot price. The value must match exactly (or use wildcards; + // greater than or less than comparison is not supported). + // + // timestamp - The timestamp of the Spot price history, in UTC format (for + // example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater + // than or less than comparison is not supported. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Filters the results by the specified instance types. + InstanceTypes []*string `locationName:"InstanceType" type:"list"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Filters the results by the specified basic product descriptions. + ProductDescriptions []*string `locationName:"ProductDescription" type:"list"` + + // The date and time, up to the past 90 days, from which to start retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + metadataDescribeSpotPriceHistoryInput `json:"-" xml:"-"` +} + +type metadataDescribeSpotPriceHistoryInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryOutput struct { + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The historical Spot prices. + SpotPriceHistory []*SpotPrice `locationName:"spotPriceHistorySet" locationNameList:"item" type:"list"` + + metadataDescribeSpotPriceHistoryOutput `json:"-" xml:"-"` +} + +type metadataDescribeSpotPriceHistoryOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryOutput) GoString() string { + return s.String() +} + +type DescribeSubnetsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availabilityZone - The Availability Zone for the subnet. You can also + // use availability-zone as the filter name. + // + // available-ip-address-count - The number of IP addresses in the subnet + // that are available. + // + // cidrBlock - The CIDR block of the subnet. The CIDR block you specify must + // exactly match the subnet's CIDR block for information to be returned for + // the subnet. You can also use cidr or cidr-block as the filter names. + // + // defaultForAz - Indicates whether this is the default subnet for the Availability + // Zone. You can also use default-for-az as the filter name. + // + // state - The state of the subnet (pending | available). + // + // subnet-id - The ID of the subnet. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the subnet. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more subnet IDs. + // + // Default: Describes all your subnets. + SubnetIds []*string `locationName:"SubnetId" locationNameList:"SubnetId" type:"list"` + + metadataDescribeSubnetsInput `json:"-" xml:"-"` +} + +type metadataDescribeSubnetsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsInput) GoString() string { + return s.String() +} + +type DescribeSubnetsOutput struct { + // Information about one or more subnets. + Subnets []*Subnet `locationName:"subnetSet" locationNameList:"item" type:"list"` + + metadataDescribeSubnetsOutput `json:"-" xml:"-"` +} + +type metadataDescribeSubnetsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // key - The tag key. + // + // resource-id - The resource ID. + // + // resource-type - The resource type (customer-gateway | dhcp-options | image + // | instance | internet-gateway | network-acl | network-interface | reserved-instances + // | route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpn-connection | vpn-gateway). + // + // value - The tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + metadataDescribeTagsInput `json:"-" xml:"-"` +} + +type metadataDescribeTagsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return.. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of tags. + Tags []*TagDescription `locationName:"tagSet" locationNameList:"item" type:"list"` + + metadataDescribeTagsOutput `json:"-" xml:"-"` +} + +type metadataDescribeTagsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DescribeVolumeAttributeInput struct { + // The instance attribute. + Attribute *string `type:"string" enum:"VolumeAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` + + metadataDescribeVolumeAttributeInput `json:"-" xml:"-"` +} + +type metadataDescribeVolumeAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeInput) GoString() string { + return s.String() +} + +type DescribeVolumeAttributeOutput struct { + // The state of autoEnableIO attribute. + AutoEnableIO *AttributeBooleanValue `locationName:"autoEnableIO" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + metadataDescribeVolumeAttributeOutput `json:"-" xml:"-"` +} + +type metadataDescribeVolumeAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeOutput) GoString() string { + return s.String() +} + +type DescribeVolumeStatusInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // action.code - The action code for the event (for example, enable-volume-io). + // + // action.description - A description of the action. + // + // action.event-id - The event ID associated with the action. + // + // availability-zone - The Availability Zone of the instance. + // + // event.description - A description of the event. + // + // event.event-id - The event ID. + // + // event.event-type - The event type (for io-enabled: passed | failed; for + // io-performance: io-performance:degraded | io-performance:severely-degraded + // | io-performance:stalled). + // + // event.not-after - The latest end time for the event. + // + // event.not-before - The earliest start time for the event. + // + // volume-status.details-name - The cause for volume-status.status (io-enabled + // | io-performance). + // + // volume-status.details-status - The status of volume-status.details-name + // (for io-enabled: passed | failed; for io-performance: normal | degraded | + // severely-degraded | stalled). + // + // volume-status.status - The status of the volume (ok | impaired | warning + // | insufficient-data). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumeStatus in + // paginated output. When this parameter is used, the request only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another request with + // the returned NextToken value. This value can be between 5 and 1000; if MaxResults + // is given a value larger than 1000, only 1000 results are returned. If this + // parameter is not used, then DescribeVolumeStatus returns all results. You + // cannot specify this parameter and the volume IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value to include in a future DescribeVolumeStatus request. + // When the results of the request exceed MaxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `type:"string"` + + // One or more volume IDs. + // + // Default: Describes all your volumes. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` + + metadataDescribeVolumeStatusInput `json:"-" xml:"-"` +} + +type metadataDescribeVolumeStatusInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVolumeStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusInput) GoString() string { + return s.String() +} + +type DescribeVolumeStatusOutput struct { + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of volumes. + VolumeStatuses []*VolumeStatusItem `locationName:"volumeStatusSet" locationNameList:"item" type:"list"` + + metadataDescribeVolumeStatusOutput `json:"-" xml:"-"` +} + +type metadataDescribeVolumeStatusOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVolumeStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusOutput) GoString() string { + return s.String() +} + +type DescribeVolumesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.attach-time - The time stamp when the attachment initiated. + // + // attachment.delete-on-termination - Whether the volume is deleted on instance + // termination. + // + // attachment.device - The device name that is exposed to the instance (for + // example, /dev/sda1). + // + // attachment.instance-id - The ID of the instance the volume is attached + // to. + // + // attachment.status - The attachment state (attaching | attached | detaching + // | detached). + // + // availability-zone - The Availability Zone in which the volume was created. + // + // create-time - The time stamp when the volume was created. + // + // encrypted - The encryption status of the volume. + // + // size - The size of the volume, in GiB. + // + // snapshot-id - The snapshot from which the volume was created. + // + // status - The status of the volume (creating | available | in-use | deleting + // | deleted | error). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // volume-id - The volume ID. + // + // volume-type - The Amazon EBS volume type. This can be gp2 for General + // Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard + // for Magnetic volumes. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumes in paginated + // output. When this parameter is used, DescribeVolumes only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeVolumes + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. If this parameter is not used, then DescribeVolumes returns + // all results. You cannot specify this parameter and the volume IDs parameter + // in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The NextToken value returned from a previous paginated DescribeVolumes request + // where MaxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // NextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more volume IDs. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` + + metadataDescribeVolumesInput `json:"-" xml:"-"` +} + +type metadataDescribeVolumesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesInput) GoString() string { + return s.String() +} + +type DescribeVolumesOutput struct { + // The NextToken value to include in a future DescribeVolumes request. When + // the results of a DescribeVolumes request exceed MaxResults, this value can + // be used to retrieve the next page of results. This value is null when there + // are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the volumes. + Volumes []*Volume `locationName:"volumeSet" locationNameList:"item" type:"list"` + + metadataDescribeVolumesOutput `json:"-" xml:"-"` +} + +type metadataDescribeVolumesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesOutput) GoString() string { + return s.String() +} + +type DescribeVpcAttributeInput struct { + // The VPC attribute. + Attribute *string `type:"string" enum:"VpcAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + metadataDescribeVpcAttributeInput `json:"-" xml:"-"` +} + +type metadataDescribeVpcAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeInput) GoString() string { + return s.String() +} + +type DescribeVpcAttributeOutput struct { + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // this attribute is true, instances in the VPC get DNS hostnames; otherwise, + // they do not. + EnableDnsHostnames *AttributeBooleanValue `locationName:"enableDnsHostnames" type:"structure"` + + // Indicates whether DNS resolution is enabled for the VPC. If this attribute + // is true, the Amazon DNS server resolves DNS hostnames for your instances + // to their corresponding IP addresses; otherwise, it does not. + EnableDnsSupport *AttributeBooleanValue `locationName:"enableDnsSupport" type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataDescribeVpcAttributeOutput `json:"-" xml:"-"` +} + +type metadataDescribeVpcAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeOutput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true + // | false). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPCs for which you want to describe the ClassicLink status. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` + + metadataDescribeVpcClassicLinkInput `json:"-" xml:"-"` +} + +type metadataDescribeVpcClassicLinkInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkInput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkOutput struct { + // The ClassicLink status of one or more VPCs. + Vpcs []*VpcClassicLink `locationName:"vpcSet" locationNameList:"item" type:"list"` + + metadataDescribeVpcClassicLinkOutput `json:"-" xml:"-"` +} + +type metadataDescribeVpcClassicLinkOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkOutput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointServicesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + metadataDescribeVpcEndpointServicesInput `json:"-" xml:"-"` +} + +type metadataDescribeVpcEndpointServicesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesInput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointServicesOutput struct { + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of supported AWS services. + ServiceNames []*string `locationName:"serviceNameSet" locationNameList:"item" type:"list"` + + metadataDescribeVpcEndpointServicesOutput `json:"-" xml:"-"` +} + +type metadataDescribeVpcEndpointServicesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesOutput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // service-name: The name of the AWS service. + // + // vpc-id: The ID of the VPC in which the endpoint resides. + // + // vpc-endpoint-id: The ID of the endpoint. + // + // vpc-endpoint-state: The state of the endpoint. (pending | available | + // deleting | deleted) + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list"` + + metadataDescribeVpcEndpointsInput `json:"-" xml:"-"` +} + +type metadataDescribeVpcEndpointsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsInput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointsOutput struct { + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the endpoints. + VpcEndpoints []*VpcEndpoint `locationName:"vpcEndpointSet" locationNameList:"item" type:"list"` + + metadataDescribeVpcEndpointsOutput `json:"-" xml:"-"` +} + +type metadataDescribeVpcEndpointsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsOutput) GoString() string { + return s.String() +} + +type DescribeVpcPeeringConnectionsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // accepter-vpc-info.cidr-block - The CIDR block of the peer VPC. + // + // accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer + // VPC. + // + // accepter-vpc-info.vpc-id - The ID of the peer VPC. + // + // expiration-time - The expiration date and time for the VPC peering connection. + // + // requester-vpc-info.cidr-block - The CIDR block of the requester's VPC. + // + // requester-vpc-info.owner-id - The AWS account ID of the owner of the requester + // VPC. + // + // requester-vpc-info.vpc-id - The ID of the requester VPC. + // + // status-code - The status of the VPC peering connection (pending-acceptance + // | failed | expired | provisioning | active | deleted | rejected). + // + // status-message - A message that provides more information about the status + // of the VPC peering connection, if applicable. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-peering-connection-id - The ID of the VPC peering connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPC peering connection IDs. + // + // Default: Describes all your VPC peering connections. + VpcPeeringConnectionIds []*string `locationName:"VpcPeeringConnectionId" locationNameList:"item" type:"list"` + + metadataDescribeVpcPeeringConnectionsInput `json:"-" xml:"-"` +} + +type metadataDescribeVpcPeeringConnectionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsInput) GoString() string { + return s.String() +} + +type DescribeVpcPeeringConnectionsOutput struct { + // Information about the VPC peering connections. + VpcPeeringConnections []*VpcPeeringConnection `locationName:"vpcPeeringConnectionSet" locationNameList:"item" type:"list"` + + metadataDescribeVpcPeeringConnectionsOutput `json:"-" xml:"-"` +} + +type metadataDescribeVpcPeeringConnectionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) GoString() string { + return s.String() +} + +type DescribeVpcsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // cidr - The CIDR block of the VPC. The CIDR block you specify must exactly + // match the VPC's CIDR block for information to be returned for the VPC. Must + // contain the slash followed by one or two digits (for example, /28). + // + // dhcp-options-id - The ID of a set of DHCP options. + // + // isDefault - Indicates whether the VPC is the default VPC. + // + // state - The state of the VPC (pending | available). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPC IDs. + // + // Default: Describes all your VPCs. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` + + metadataDescribeVpcsInput `json:"-" xml:"-"` +} + +type metadataDescribeVpcsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsInput) GoString() string { + return s.String() +} + +type DescribeVpcsOutput struct { + // Information about one or more VPCs. + Vpcs []*Vpc `locationName:"vpcSet" locationNameList:"item" type:"list"` + + metadataDescribeVpcsOutput `json:"-" xml:"-"` +} + +type metadataDescribeVpcsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpcsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsOutput) GoString() string { + return s.String() +} + +type DescribeVpnConnectionsInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // customer-gateway-configuration - The configuration information for the + // customer gateway. + // + // customer-gateway-id - The ID of a customer gateway associated with the + // VPN connection. + // + // state - The state of the VPN connection (pending | available | deleting + // | deleted). + // + // option.static-routes-only - Indicates whether the connection has static + // routes only. Used for devices that do not support Border Gateway Protocol + // (BGP). + // + // route.destination-cidr-block - The destination CIDR block. This corresponds + // to the subnet used in a customer data center. + // + // bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP + // device. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of VPN connection. Currently the only supported type is + // ipsec.1. + // + // vpn-connection-id - The ID of the VPN connection. + // + // vpn-gateway-id - The ID of a virtual private gateway associated with the + // VPN connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPN connection IDs. + // + // Default: Describes your VPN connections. + VpnConnectionIds []*string `locationName:"VpnConnectionId" locationNameList:"VpnConnectionId" type:"list"` + + metadataDescribeVpnConnectionsInput `json:"-" xml:"-"` +} + +type metadataDescribeVpnConnectionsInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsInput) GoString() string { + return s.String() +} + +type DescribeVpnConnectionsOutput struct { + // Information about one or more VPN connections. + VpnConnections []*VpnConnection `locationName:"vpnConnectionSet" locationNameList:"item" type:"list"` + + metadataDescribeVpnConnectionsOutput `json:"-" xml:"-"` +} + +type metadataDescribeVpnConnectionsOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsOutput) GoString() string { + return s.String() +} + +type DescribeVpnGatewaysInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.state - The current state of the attachment between the gateway + // and the VPC (attaching | attached | detaching | detached). + // + // attachment.vpc-id - The ID of an attached VPC. + // + // availability-zone - The Availability Zone for the virtual private gateway. + // + // state - The state of the virtual private gateway (pending | available + // | deleting | deleted). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of virtual private gateway. Currently the only supported + // type is ipsec.1. + // + // vpn-gateway-id - The ID of the virtual private gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more virtual private gateway IDs. + // + // Default: Describes all your virtual private gateways. + VpnGatewayIds []*string `locationName:"VpnGatewayId" locationNameList:"VpnGatewayId" type:"list"` + + metadataDescribeVpnGatewaysInput `json:"-" xml:"-"` +} + +type metadataDescribeVpnGatewaysInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysInput) GoString() string { + return s.String() +} + +type DescribeVpnGatewaysOutput struct { + // Information about one or more virtual private gateways. + VpnGateways []*VpnGateway `locationName:"vpnGatewaySet" locationNameList:"item" type:"list"` + + metadataDescribeVpnGatewaysOutput `json:"-" xml:"-"` +} + +type metadataDescribeVpnGatewaysOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysOutput) GoString() string { + return s.String() +} + +type DetachClassicLinkVpcInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to unlink from the VPC. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the VPC to which the instance is linked. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataDetachClassicLinkVpcInput `json:"-" xml:"-"` +} + +type metadataDetachClassicLinkVpcInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcInput) GoString() string { + return s.String() +} + +type DetachClassicLinkVpcOutput struct { + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` + + metadataDetachClassicLinkVpcOutput `json:"-" xml:"-"` +} + +type metadataDetachClassicLinkVpcOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +type DetachInternetGatewayInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataDetachInternetGatewayInput `json:"-" xml:"-"` +} + +type metadataDetachInternetGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayInput) GoString() string { + return s.String() +} + +type DetachInternetGatewayOutput struct { + metadataDetachInternetGatewayOutput `json:"-" xml:"-"` +} + +type metadataDetachInternetGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayOutput) GoString() string { + return s.String() +} + +type DetachNetworkInterfaceInput struct { + // The ID of the attachment. + AttachmentId *string `locationName:"attachmentId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether to force a detachment. + Force *bool `locationName:"force" type:"boolean"` + + metadataDetachNetworkInterfaceInput `json:"-" xml:"-"` +} + +type metadataDetachNetworkInterfaceInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceInput) GoString() string { + return s.String() +} + +type DetachNetworkInterfaceOutput struct { + metadataDetachNetworkInterfaceOutput `json:"-" xml:"-"` +} + +type metadataDetachNetworkInterfaceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type DetachVolumeInput struct { + // The device name. + Device *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces detachment if the previous detachment attempt did not occur cleanly + // (for example, logging into an instance, unmounting the volume, and detaching + // normally). This option can lead to data loss or a corrupted file system. + // Use this option only as a last resort to detach a volume from a failed instance. + // The instance won't have an opportunity to flush file system caches or file + // system metadata. If you use this option, you must perform file system check + // and repair procedures. + Force *bool `type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` + + metadataDetachVolumeInput `json:"-" xml:"-"` +} + +type metadataDetachVolumeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVolumeInput) GoString() string { + return s.String() +} + +type DetachVpnGatewayInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` + + metadataDetachVpnGatewayInput `json:"-" xml:"-"` +} + +type metadataDetachVpnGatewayInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayInput) GoString() string { + return s.String() +} + +type DetachVpnGatewayOutput struct { + metadataDetachVpnGatewayOutput `json:"-" xml:"-"` +} + +type metadataDetachVpnGatewayOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DetachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a DHCP configuration option. +type DhcpConfiguration struct { + // The name of a DHCP option. + Key *string `locationName:"key" type:"string"` + + // One or more values for the DHCP option. + Values []*AttributeValue `locationName:"valueSet" locationNameList:"item" type:"list"` + + metadataDhcpConfiguration `json:"-" xml:"-"` +} + +type metadataDhcpConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpConfiguration) GoString() string { + return s.String() +} + +// Describes a set of DHCP options. +type DhcpOptions struct { + // One or more DHCP options in the set. + DhcpConfigurations []*DhcpConfiguration `locationName:"dhcpConfigurationSet" locationNameList:"item" type:"list"` + + // The ID of the set of DHCP options. + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // Any tags assigned to the DHCP options set. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + metadataDhcpOptions `json:"-" xml:"-"` +} + +type metadataDhcpOptions struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DhcpOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpOptions) GoString() string { + return s.String() +} + +type DisableVgwRoutePropagationInput struct { + // The ID of the virtual private gateway. + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + RouteTableId *string `type:"string" required:"true"` + + metadataDisableVgwRoutePropagationInput `json:"-" xml:"-"` +} + +type metadataDisableVgwRoutePropagationInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +type DisableVgwRoutePropagationOutput struct { + metadataDisableVgwRoutePropagationOutput `json:"-" xml:"-"` +} + +type metadataDisableVgwRoutePropagationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataDisableVpcClassicLinkInput `json:"-" xml:"-"` +} + +type metadataDisableVpcClassicLinkInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkInput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkOutput struct { + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` + + metadataDisableVpcClassicLinkOutput `json:"-" xml:"-"` +} + +type metadataDisableVpcClassicLinkOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +type DisassociateAddressInput struct { + // [EC2-VPC] The association ID. Required for EC2-VPC. + AssociationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` + + metadataDisassociateAddressInput `json:"-" xml:"-"` +} + +type metadataDisassociateAddressInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisassociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressInput) GoString() string { + return s.String() +} + +type DisassociateAddressOutput struct { + metadataDisassociateAddressOutput `json:"-" xml:"-"` +} + +type metadataDisassociateAddressOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisassociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressOutput) GoString() string { + return s.String() +} + +type DisassociateRouteTableInput struct { + // The association ID representing the current association between the route + // table and subnet. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + metadataDisassociateRouteTableInput `json:"-" xml:"-"` +} + +type metadataDisassociateRouteTableInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisassociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableInput) GoString() string { + return s.String() +} + +type DisassociateRouteTableOutput struct { + metadataDisassociateRouteTableOutput `json:"-" xml:"-"` +} + +type metadataDisassociateRouteTableOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DisassociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableOutput) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImage struct { + // A description of the disk image. + Description *string `type:"string"` + + // Information about the disk image. + Image *DiskImageDetail `type:"structure"` + + // Information about the volume. + Volume *VolumeDetail `type:"structure"` + + metadataDiskImage `json:"-" xml:"-"` +} + +type metadataDiskImage struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DiskImage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImage) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImageDescription struct { + // The checksum computed for the disk image. + Checksum *string `locationName:"checksum" type:"string"` + + // The disk image format. + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3. For information + // about creating a presigned URL for an Amazon S3 object, read the "Query String + // Request Authentication Alternative" section of the Authenticating REST Requests + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` + + // The size of the disk image, in GiB. + Size *int64 `locationName:"size" type:"long" required:"true"` + + metadataDiskImageDescription `json:"-" xml:"-"` +} + +type metadataDiskImageDescription struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DiskImageDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDescription) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImageDetail struct { + // The size of the disk image, in GiB. + Bytes *int64 `locationName:"bytes" type:"long" required:"true"` + + // The disk image format. + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3 and presented + // here as an Amazon S3 presigned URL. For information about creating a presigned + // URL for an Amazon S3 object, read the "Query String Request Authentication + // Alternative" section of the Authenticating REST Requests (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` + + metadataDiskImageDetail `json:"-" xml:"-"` +} + +type metadataDiskImageDetail struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DiskImageDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDetail) GoString() string { + return s.String() +} + +// Describes a disk image volume. +type DiskImageVolumeDescription struct { + // The volume identifier. + Id *string `locationName:"id" type:"string" required:"true"` + + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long"` + + metadataDiskImageVolumeDescription `json:"-" xml:"-"` +} + +type metadataDiskImageVolumeDescription struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s DiskImageVolumeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageVolumeDescription) GoString() string { + return s.String() +} + +// Describes a block device for an EBS volume. +type EbsBlockDevice struct { + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes + // may only be attached to instances that support Amazon EBS encryption. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information on General Purpose (SSD) baseline + // performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The size of the volume, in GiB. + // + // Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 + // for io1 volumes. If you specify a snapshot, the volume size must be equal + // to or larger than the snapshot size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` + + metadataEbsBlockDevice `json:"-" xml:"-"` +} + +type metadataEbsBlockDevice struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// Describes a parameter used to set up an EBS volume in a block device mapping. +type EbsInstanceBlockDevice struct { + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + metadataEbsInstanceBlockDevice `json:"-" xml:"-"` +} + +type metadataEbsInstanceBlockDevice struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EbsInstanceBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDevice) GoString() string { + return s.String() +} + +type EbsInstanceBlockDeviceSpecification struct { + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + metadataEbsInstanceBlockDeviceSpecification `json:"-" xml:"-"` +} + +type metadataEbsInstanceBlockDeviceSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EbsInstanceBlockDeviceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDeviceSpecification) GoString() string { + return s.String() +} + +type EnableVgwRoutePropagationInput struct { + // The ID of the virtual private gateway. + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + RouteTableId *string `type:"string" required:"true"` + + metadataEnableVgwRoutePropagationInput `json:"-" xml:"-"` +} + +type metadataEnableVgwRoutePropagationInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +type EnableVgwRoutePropagationOutput struct { + metadataEnableVgwRoutePropagationOutput `json:"-" xml:"-"` +} + +type metadataEnableVgwRoutePropagationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +type EnableVolumeIOInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string" required:"true"` + + metadataEnableVolumeIOInput `json:"-" xml:"-"` +} + +type metadataEnableVolumeIOInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EnableVolumeIOInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOInput) GoString() string { + return s.String() +} + +type EnableVolumeIOOutput struct { + metadataEnableVolumeIOOutput `json:"-" xml:"-"` +} + +type metadataEnableVolumeIOOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EnableVolumeIOOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOOutput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataEnableVpcClassicLinkInput `json:"-" xml:"-"` +} + +type metadataEnableVpcClassicLinkInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkInput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkOutput struct { + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` + + metadataEnableVpcClassicLinkOutput `json:"-" xml:"-"` +} + +type metadataEnableVpcClassicLinkOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet event. +type EventInformation struct { + // The description of the event. + EventDescription *string `locationName:"eventDescription" type:"string"` + + // The event. + // + // The following are the error events. + // + // iamFleetRoleInvalid - The Spot fleet did not have the required permissions + // either to launch or terminate an instance. + // + // launchSpecTemporarilyBlacklisted - The configuration is not valid and + // several attempts to launch instances have failed. For more information, see + // the description of the event. + // + // spotFleetRequestConfigurationInvalid - The configuration is not valid. + // For more information, see the description of the event. + // + // spotInstanceCountLimitExceeded - You've reached the limit on the number + // of Spot instances that you can launch. + // + // The following are the fleetRequestChange events. + // + // active - The Spot fleet has been validated and Amazon EC2 is attempting + // to maintain the target number of running Spot instances. + // + // cancelled - The Spot fleet is canceled and has no running Spot instances. + // The Spot fleet will be deleted two days after its instances were terminated. + // + // cancelled_running - The Spot fleet is canceled and will not launch additional + // Spot instances, but its existing Spot instances continue to run until they + // are interrupted or terminated. + // + // cancelled_terminating - The Spot fleet is canceled and its Spot instances + // are terminating. + // + // expired - The Spot fleet request has expired. A subsequent event indicates + // that the instances were terminated, if the request was created with TerminateInstancesWithExpiration + // set. + // + // modify_in_progress - A request to modify the Spot fleet request was accepted + // and is in progress. + // + // modify_successful - The Spot fleet request was modified. + // + // price_update - The bid price for a launch configuration was adjusted because + // it was too high. This change is permanent. + // + // submitted - The Spot fleet request is being evaluated and Amazon EC2 is + // preparing to launch the target number of Spot instances. + // + // The following are the instanceChange events. + // + // launched - A bid was fulfilled and a new instance was launched. + // + // terminated - An instance was terminated by the user. + EventSubType *string `locationName:"eventSubType" type:"string"` + + // The ID of the instance. This information is available only for instanceChange + // events. + InstanceId *string `locationName:"instanceId" type:"string"` + + metadataEventInformation `json:"-" xml:"-"` +} + +type metadataEventInformation struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s EventInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventInformation) GoString() string { + return s.String() +} + +// Describes an instance export task. +type ExportTask struct { + // A description of the resource being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export task. + ExportTaskId *string `locationName:"exportTaskId" type:"string"` + + // Information about the export task. + ExportToS3Task *ExportToS3Task `locationName:"exportToS3" type:"structure"` + + // Information about the instance to export. + InstanceExportDetails *InstanceExportDetails `locationName:"instanceExport" type:"structure"` + + // The state of the export task. + State *string `locationName:"state" type:"string" enum:"ExportTaskState"` + + // The status message related to the export task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + metadataExportTask `json:"-" xml:"-"` +} + +type metadataExportTask struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// Describes the format and location for an instance export task. +type ExportToS3Task struct { + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The S3 bucket for the destination image. The destination bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The encryption key for your S3 bucket. + S3Key *string `locationName:"s3Key" type:"string"` + + metadataExportToS3Task `json:"-" xml:"-"` +} + +type metadataExportToS3Task struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ExportToS3Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3Task) GoString() string { + return s.String() +} + +// Describes an instance export task. +type ExportToS3TaskSpecification struct { + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The S3 bucket for the destination image. The destination bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The image is written to a single object in the S3 bucket at the S3 key s3prefix + // + exportTaskId + '.' + diskImageFormat. + S3Prefix *string `locationName:"s3Prefix" type:"string"` + + metadataExportToS3TaskSpecification `json:"-" xml:"-"` +} + +type metadataExportToS3TaskSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ExportToS3TaskSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3TaskSpecification) GoString() string { + return s.String() +} + +// A filter name and value pair that is used to return a more specific list +// of results. Filters can be used to match a set of resources by various criteria, +// such as tags, attributes, or IDs. +type Filter struct { + // The name of the filter. Filter names are case-sensitive. + Name *string `type:"string"` + + // One or more filter values. Filter values are case-sensitive. + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` + + metadataFilter `json:"-" xml:"-"` +} + +type metadataFilter struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Describes a flow log. +type FlowLog struct { + // The date and time the flow log was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // Information about the error that occurred. Rate limited indicates that CloudWatch + // logs throttling has been applied for one or more network interfaces. Access + // error indicates that the IAM role associated with the flow log does not have + // sufficient permissions to publish to CloudWatch Logs. Unknown error indicates + // an internal error. + DeliverLogsErrorMessage *string `locationName:"deliverLogsErrorMessage" type:"string"` + + // The ARN of the IAM role that posts logs to CloudWatch Logs. + DeliverLogsPermissionArn *string `locationName:"deliverLogsPermissionArn" type:"string"` + + // The status of the logs delivery (SUCCESS | FAILED). + DeliverLogsStatus *string `locationName:"deliverLogsStatus" type:"string"` + + // The flow log ID. + FlowLogId *string `locationName:"flowLogId" type:"string"` + + // The status of the flow log (ACTIVE). + FlowLogStatus *string `locationName:"flowLogStatus" type:"string"` + + // The name of the flow log group. + LogGroupName *string `locationName:"logGroupName" type:"string"` + + // The ID of the resource on which the flow log was created. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The type of traffic captured for the flow log. + TrafficType *string `locationName:"trafficType" type:"string" enum:"TrafficType"` + + metadataFlowLog `json:"-" xml:"-"` +} + +type metadataFlowLog struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s FlowLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlowLog) GoString() string { + return s.String() +} + +type GetConsoleOutputInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + metadataGetConsoleOutputInput `json:"-" xml:"-"` +} + +type metadataGetConsoleOutputInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s GetConsoleOutputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputInput) GoString() string { + return s.String() +} + +type GetConsoleOutputOutput struct { + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The console output, Base64 encoded. + Output *string `locationName:"output" type:"string"` + + // The time the output was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` + + metadataGetConsoleOutputOutput `json:"-" xml:"-"` +} + +type metadataGetConsoleOutputOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s GetConsoleOutputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputOutput) GoString() string { + return s.String() +} + +type GetPasswordDataInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Windows instance. + InstanceId *string `type:"string" required:"true"` + + metadataGetPasswordDataInput `json:"-" xml:"-"` +} + +type metadataGetPasswordDataInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s GetPasswordDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataInput) GoString() string { + return s.String() +} + +type GetPasswordDataOutput struct { + // The ID of the Windows instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The password of the instance. + PasswordData *string `locationName:"passwordData" type:"string"` + + // The time the data was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` + + metadataGetPasswordDataOutput `json:"-" xml:"-"` +} + +type metadataGetPasswordDataOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s GetPasswordDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataOutput) GoString() string { + return s.String() +} + +// Describes a security group. +type GroupIdentifier struct { + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + metadataGroupIdentifier `json:"-" xml:"-"` +} + +type metadataGroupIdentifier struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s GroupIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupIdentifier) GoString() string { + return s.String() +} + +// Describes an event in the history of the Spot fleet request. +type HistoryRecord struct { + // Information about the event. + EventInformation *EventInformation `locationName:"eventInformation" type:"structure" required:"true"` + + // The event type. + // + // error - Indicates an error with the Spot fleet request. + // + // fleetRequestChange - Indicates a change in the status or configuration + // of the Spot fleet request. + // + // instanceChange - Indicates that an instance was launched or terminated. + EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"` + + // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + metadataHistoryRecord `json:"-" xml:"-"` +} + +type metadataHistoryRecord struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s HistoryRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryRecord) GoString() string { + return s.String() +} + +// Describes an IAM instance profile. +type IamInstanceProfile struct { + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The ID of the instance profile. + Id *string `locationName:"id" type:"string"` + + metadataIamInstanceProfile `json:"-" xml:"-"` +} + +type metadataIamInstanceProfile struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s IamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfile) GoString() string { + return s.String() +} + +// Describes an IAM instance profile. +type IamInstanceProfileSpecification struct { + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The name of the instance profile. + Name *string `locationName:"name" type:"string"` + + metadataIamInstanceProfileSpecification `json:"-" xml:"-"` +} + +type metadataIamInstanceProfileSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s IamInstanceProfileSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfileSpecification) GoString() string { + return s.String() +} + +// Describes the ICMP type and code. +type IcmpTypeCode struct { + // The ICMP type. A value of -1 means all types. + Code *int64 `locationName:"code" type:"integer"` + + // The ICMP code. A value of -1 means all codes for the specified ICMP type. + Type *int64 `locationName:"type" type:"integer"` + + metadataIcmpTypeCode `json:"-" xml:"-"` +} + +type metadataIcmpTypeCode struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s IcmpTypeCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IcmpTypeCode) GoString() string { + return s.String() +} + +// Describes an image. +type Image struct { + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The date and time the image was created. + CreationDate *string `locationName:"creationDate" type:"string"` + + // The description of the AMI that was provided during image creation. + Description *string `locationName:"description" type:"string"` + + // The hypervisor type of the image. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The location of the AMI. + ImageLocation *string `locationName:"imageLocation" type:"string"` + + // The AWS account alias (for example, amazon, self) or the AWS account ID of + // the AMI owner. + ImageOwnerAlias *string `locationName:"imageOwnerAlias" type:"string"` + + // The type of image. + ImageType *string `locationName:"imageType" type:"string" enum:"ImageTypeValues"` + + // The kernel associated with the image, if any. Only applicable for machine + // images. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the AMI that was provided during image creation. + Name *string `locationName:"name" type:"string"` + + // The AWS account ID of the image owner. + OwnerId *string `locationName:"imageOwnerId" type:"string"` + + // The value is Windows for Windows AMIs; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // Any product codes associated with the AMI. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // Indicates whether the image has public launch permissions. The value is true + // if this image has public launch permissions or false if it has only implicit + // and explicit launch permissions. + Public *bool `locationName:"isPublic" type:"boolean"` + + // The RAM disk associated with the image, if any. Only applicable for machine + // images. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The device name of the root device (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The type of root device used by the AMI. The AMI can use an EBS volume or + // an instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // Specifies whether enhanced networking is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the AMI. If the state is available, the image is successfully + // registered and can be used to launch an instance. + State *string `locationName:"imageState" type:"string" enum:"ImageState"` + + // The reason for the state change. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // Any tags assigned to the image. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of virtualization of the AMI. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` + + metadataImage `json:"-" xml:"-"` +} + +type metadataImage struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +// Describes the disk container object for an import image task. +type ImageDiskContainer struct { + // The description of the disk image. + Description *string `type:"string"` + + // The block device mapping for the disk. + DeviceName *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: RAW | VHD | VMDK | OVA + Format *string `type:"string"` + + // The ID of the EBS snapshot to be used for importing the snapshot. + SnapshotId *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. The URL can either + // be a https URL (https://..) or an Amazon S3 URL (s3://..) + Url *string `type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` + + metadataImageDiskContainer `json:"-" xml:"-"` +} + +type metadataImageDiskContainer struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImageDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDiskContainer) GoString() string { + return s.String() +} + +type ImportImageInput struct { + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 + Architecture *string `type:"string"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // The token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // A description string for the import image task. + Description *string `type:"string"` + + // Information about the disk containers. + DiskContainers []*ImageDiskContainer `locationName:"DiskContainer" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The target hypervisor platform. + // + // Valid values: xen + Hypervisor *string `type:"string"` + + // The license type to be used for the Amazon Machine Image (AMI) after importing. + // + // Note: You may only use BYOL if you have existing licenses with rights to + // use these licenses in a third party cloud like AWS. For more information, + // see VM Import/Export Prerequisites (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Valid values: AWS | BYOL + LicenseType *string `type:"string"` + + // The operating system of the virtual machine. + // + // Valid values: Windows | Linux + Platform *string `type:"string"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` + + metadataImportImageInput `json:"-" xml:"-"` +} + +type metadataImportImageInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageInput) GoString() string { + return s.String() +} + +type ImportImageOutput struct { + // The architecture of the virtual machine. + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // The target hypervisor of the import task. + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) created by the import task. + ImageId *string `locationName:"imageId" type:"string"` + + // The task ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The operating system of the virtual machine. + Platform *string `locationName:"platform" type:"string"` + + // The progress of the task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status of the task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message of the import task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + metadataImportImageOutput `json:"-" xml:"-"` +} + +type metadataImportImageOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageOutput) GoString() string { + return s.String() +} + +// Describes an import image task. +type ImportImageTask struct { + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // The target hypervisor for the import task. + // + // Valid values: xen + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) of the imported virtual machine. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The description string for the import image task. + Platform *string `locationName:"platform" type:"string"` + + // The percentage of progress of the import image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status for the import image task. + Status *string `locationName:"status" type:"string"` + + // A descriptive status message for the import image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + metadataImportImageTask `json:"-" xml:"-"` +} + +type metadataImportImageTask struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageTask) GoString() string { + return s.String() +} + +type ImportInstanceInput struct { + // A description for the instance being imported. + Description *string `locationName:"description" type:"string"` + + // The disk image. + DiskImages []*DiskImage `locationName:"diskImage" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The launch specification. + LaunchSpecification *ImportInstanceLaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" required:"true" enum:"PlatformValues"` + + metadataImportInstanceInput `json:"-" xml:"-"` +} + +type metadataImportInstanceInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceInput) GoString() string { + return s.String() +} + +// Describes the launch specification for VM import. +type ImportInstanceLaunchSpecification struct { + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The architecture of the instance. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // One or more security group IDs. + GroupIds []*string `locationName:"GroupId" locationNameList:"SecurityGroupId" type:"list"` + + // One or more security group names. + GroupNames []*string `locationName:"GroupName" locationNameList:"SecurityGroup" type:"list"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information about the instance types that you + // can import, see Before You Get Started (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether monitoring is enabled. + Monitoring *bool `locationName:"monitoring" type:"boolean"` + + // The placement information for the instance. + Placement *Placement `locationName:"placement" type:"structure"` + + // [EC2-VPC] An available IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // [EC2-VPC] The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to be made available to the instance. + UserData *UserData `locationName:"userData" type:"structure"` + + metadataImportInstanceLaunchSpecification `json:"-" xml:"-"` +} + +type metadataImportInstanceLaunchSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceLaunchSpecification) GoString() string { + return s.String() +} + +type ImportInstanceOutput struct { + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` + + metadataImportInstanceOutput `json:"-" xml:"-"` +} + +type metadataImportInstanceOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceOutput) GoString() string { + return s.String() +} + +// Describes an import instance task. +type ImportInstanceTaskDetails struct { + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // One or more volumes. + Volumes []*ImportInstanceVolumeDetailItem `locationName:"volumes" locationNameList:"item" type:"list" required:"true"` + + metadataImportInstanceTaskDetails `json:"-" xml:"-"` +} + +type metadataImportInstanceTaskDetails struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceTaskDetails) GoString() string { + return s.String() +} + +// Describes an import volume task. +type ImportInstanceVolumeDetailItem struct { + // The Availability Zone where the resulting instance will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + + // The status of the import of this particular disk image. + Status *string `locationName:"status" type:"string" required:"true"` + + // The status information or errors related to the disk image. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` + + metadataImportInstanceVolumeDetailItem `json:"-" xml:"-"` +} + +type metadataImportInstanceVolumeDetailItem struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceVolumeDetailItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceVolumeDetailItem) GoString() string { + return s.String() +} + +type ImportKeyPairInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + KeyName *string `locationName:"keyName" type:"string" required:"true"` + + // The public key. You must base64 encode the public key material before sending + // it to AWS. + PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"` + + metadataImportKeyPairInput `json:"-" xml:"-"` +} + +type metadataImportKeyPairInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairInput) GoString() string { + return s.String() +} + +type ImportKeyPairOutput struct { + // The MD5 public key fingerprint as specified in section 4 of RFC 4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The key pair name you provided. + KeyName *string `locationName:"keyName" type:"string"` + + metadataImportKeyPairOutput `json:"-" xml:"-"` +} + +type metadataImportKeyPairOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairOutput) GoString() string { + return s.String() +} + +type ImportSnapshotInput struct { + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // Token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // The description string for the import snapshot task. + Description *string `type:"string"` + + // Information about the disk container. + DiskContainer *SnapshotDiskContainer `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` + + metadataImportSnapshotInput `json:"-" xml:"-"` +} + +type metadataImportSnapshotInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotInput) GoString() string { + return s.String() +} + +type ImportSnapshotOutput struct { + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Information about the import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` + + metadataImportSnapshotOutput `json:"-" xml:"-"` +} + +type metadataImportSnapshotOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotOutput) GoString() string { + return s.String() +} + +// Describes an import snapshot task. +type ImportSnapshotTask struct { + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Describes an import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` + + metadataImportSnapshotTask `json:"-" xml:"-"` +} + +type metadataImportSnapshotTask struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotTask) GoString() string { + return s.String() +} + +type ImportVolumeInput struct { + // The Availability Zone for the resulting EBS volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // A description of the volume. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The disk image. + Image *DiskImageDetail `locationName:"image" type:"structure" required:"true"` + + // The volume size. + Volume *VolumeDetail `locationName:"volume" type:"structure" required:"true"` + + metadataImportVolumeInput `json:"-" xml:"-"` +} + +type metadataImportVolumeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeInput) GoString() string { + return s.String() +} + +type ImportVolumeOutput struct { + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` + + metadataImportVolumeOutput `json:"-" xml:"-"` +} + +type metadataImportVolumeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeOutput) GoString() string { + return s.String() +} + +// Describes an import volume task. +type ImportVolumeTaskDetails struct { + // The Availability Zone where the resulting volume will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + + // The description you provided when starting the import volume task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` + + metadataImportVolumeTaskDetails `json:"-" xml:"-"` +} + +type metadataImportVolumeTaskDetails struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ImportVolumeTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeTaskDetails) GoString() string { + return s.String() +} + +// Describes an instance. +type Instance struct { + // The AMI launch index, which can be used to find this instance in the launch + // group. + AmiLaunchIndex *int64 `locationName:"amiLaunchIndex" type:"integer"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries for the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The idempotency token you provided when you launched the instance, if applicable. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The hypervisor type of the instance. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The IAM instance profile associated with the instance, if applicable. + IamInstanceProfile *IamInstanceProfile `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI used to launch the instance. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether this is a Spot Instance. + InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The kernel associated with this instance, if applicable. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair, if this instance was launched with an associated + // key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // The time the instance was launched. + LaunchTime *time.Time `locationName:"launchTime" type:"timestamp" timestampFormat:"iso8601"` + + // The monitoring information for the instance. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` + + // [EC2-VPC] One or more network interfaces for the instance. + NetworkInterfaces []*InstanceNetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The location where the instance launched, if applicable. + Placement *Placement `locationName:"placement" type:"structure"` + + // The value is Windows for Windows instances; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // The private DNS name assigned to the instance. This DNS name can only be + // used inside the Amazon EC2 network. This name is not available until the + // instance enters the running state. For EC2-VPC, this name is only available + // if you've enabled DNS hostnames for your VPC. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address assigned to the instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The product codes attached to this instance, if applicable. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The public DNS name assigned to the instance. This name is not available + // until the instance enters the running state. For EC2-VPC, this name is only + // available if you've enabled DNS hostnames for your VPC. + PublicDnsName *string `locationName:"dnsName" type:"string"` + + // The public IP address assigned to the instance, if applicable. + PublicIpAddress *string `locationName:"ipAddress" type:"string"` + + // The RAM disk associated with this instance, if applicable. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The root device name (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The root device type used by the AMI. The AMI can use an EBS volume or an + // instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // One or more security groups for the instance. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // Specifies whether to enable an instance launched in a VPC to perform NAT. + // This controls whether source/destination checking is enabled on the instance. + // A value of true means checking is enabled, and false means checking is disabled. + // The value must be false for the instance to perform NAT. For more information, + // see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // If the request is a Spot instance request, the ID of the request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // Specifies whether enhanced networking is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the instance. + State *InstanceState `locationName:"instanceState" type:"structure"` + + // The reason for the most recent state transition. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // The reason for the most recent state transition. This might be an empty string. + StateTransitionReason *string `locationName:"reason" type:"string"` + + // [EC2-VPC] The ID of the subnet in which the instance is running. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The virtualization type of the instance. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` + + // [EC2-VPC] The ID of the VPC in which the instance is running. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataInstance `json:"-" xml:"-"` +} + +type metadataInstance struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type InstanceBlockDeviceMapping struct { + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDevice `locationName:"ebs" type:"structure"` + + metadataInstanceBlockDeviceMapping `json:"-" xml:"-"` +} + +type metadataInstanceBlockDeviceMapping struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes a block device mapping entry. +type InstanceBlockDeviceMappingSpecification struct { + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDeviceSpecification `locationName:"ebs" type:"structure"` + + // suppress the specified device included in the block device mapping. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name. + VirtualName *string `locationName:"virtualName" type:"string"` + + metadataInstanceBlockDeviceMappingSpecification `json:"-" xml:"-"` +} + +type metadataInstanceBlockDeviceMappingSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMappingSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMappingSpecification) GoString() string { + return s.String() +} + +// Describes a Reserved Instance listing state. +type InstanceCount struct { + // The number of listed Reserved Instances in the state specified by the state. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The states of the listed Reserved Instances. + State *string `locationName:"state" type:"string" enum:"ListingState"` + + metadataInstanceCount `json:"-" xml:"-"` +} + +type metadataInstanceCount struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCount) GoString() string { + return s.String() +} + +// Describes an instance to export. +type InstanceExportDetails struct { + // The ID of the resource being exported. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` + + metadataInstanceExportDetails `json:"-" xml:"-"` +} + +type metadataInstanceExportDetails struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceExportDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceExportDetails) GoString() string { + return s.String() +} + +// Describes the monitoring information of the instance. +type InstanceMonitoring struct { + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The monitoring information. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` + + metadataInstanceMonitoring `json:"-" xml:"-"` +} + +type metadataInstanceMonitoring struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMonitoring) GoString() string { + return s.String() +} + +// Describes a network interface. +type InstanceNetworkInterface struct { + // The association information for an Elastic IP associated with the network + // interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *InstanceNetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description. + Description *string `locationName:"description" type:"string"` + + // One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that created the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IP addresses associated with the network interface. + PrivateIpAddresses []*InstancePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // Indicates whether to validate network traffic to or from this network interface. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataInstanceNetworkInterface `json:"-" xml:"-"` +} + +type metadataInstanceNetworkInterface struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterface) GoString() string { + return s.String() +} + +// Describes association information for an Elastic IP address. +type InstanceNetworkInterfaceAssociation struct { + // The ID of the owner of the Elastic IP address. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The public IP address or Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` + + metadataInstanceNetworkInterfaceAssociation `json:"-" xml:"-"` +} + +type metadataInstanceNetworkInterfaceAssociation struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// Describes a network interface attachment. +type InstanceNetworkInterfaceAttachment struct { + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The index of the device on the instance for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` + + metadataInstanceNetworkInterfaceAttachment `json:"-" xml:"-"` +} + +type metadataInstanceNetworkInterfaceAttachment struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// Describes a network interface. +type InstanceNetworkInterfaceSpecification struct { + // Indicates whether to assign a public IP address to an instance you launch + // in a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` + + // If set to true, the interface is deleted when the instance is terminated. + // You can specify true only if creating a new network interface when launching + // an instance. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The description of the network interface. Applies only if creating a network + // interface when launching an instance. + Description *string `locationName:"description" type:"string"` + + // The index of the device on the instance for the network interface attachment. + // If you are specifying a network interface in a RunInstances request, you + // must provide the device index. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The IDs of the security groups for the network interface. Applies only if + // creating a network interface when launching an instance. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IP address of the network interface. Applies only if creating + // a network interface when launching an instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IP addresses to assign to the network interface. Only + // one private IP address can be designated as primary. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddressesSet" queryName:"PrivateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IP addresses. You can't specify this option + // and specify more than one private IP address using the private IP addresses + // option. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet associated with the network string. Applies only if + // creating a network interface when launching an instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + metadataInstanceNetworkInterfaceSpecification `json:"-" xml:"-"` +} + +type metadataInstanceNetworkInterfaceSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceSpecification) GoString() string { + return s.String() +} + +// Describes a private IP address. +type InstancePrivateIpAddress struct { + // The association information for an Elastic IP address for the network interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IP address is the primary private IP address of the + // network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address of the network interface. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + metadataInstancePrivateIpAddress `json:"-" xml:"-"` +} + +type metadataInstancePrivateIpAddress struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstancePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancePrivateIpAddress) GoString() string { + return s.String() +} + +// Describes the current state of the instance. +type InstanceState struct { + // The low byte represents the state. The high byte is an opaque internal value + // and should be ignored. + // + // 0 : pending + // + // 16 : running + // + // 32 : shutting-down + // + // 48 : terminated + // + // 64 : stopping + // + // 80 : stopped + Code *int64 `locationName:"code" type:"integer"` + + // The current state of the instance. + Name *string `locationName:"name" type:"string" enum:"InstanceStateName"` + + metadataInstanceState `json:"-" xml:"-"` +} + +type metadataInstanceState struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceState) GoString() string { + return s.String() +} + +// Describes an instance state change. +type InstanceStateChange struct { + // The current state of the instance. + CurrentState *InstanceState `locationName:"currentState" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The previous state of the instance. + PreviousState *InstanceState `locationName:"previousState" type:"structure"` + + metadataInstanceStateChange `json:"-" xml:"-"` +} + +type metadataInstanceStateChange struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStateChange) GoString() string { + return s.String() +} + +// Describes the status of an instance. +type InstanceStatus struct { + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Any scheduled events associated with the instance. + Events []*InstanceStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The intended state of the instance. DescribeInstanceStatus requires that + // an instance be in the running state. + InstanceState *InstanceState `locationName:"instanceState" type:"structure"` + + // Reports impaired functionality that stems from issues internal to the instance, + // such as impaired reachability. + InstanceStatus *InstanceStatusSummary `locationName:"instanceStatus" type:"structure"` + + // Reports impaired functionality that stems from issues related to the systems + // that support an instance, such as hardware failures and network connectivity + // problems. + SystemStatus *InstanceStatusSummary `locationName:"systemStatus" type:"structure"` + + metadataInstanceStatus `json:"-" xml:"-"` +} + +type metadataInstanceStatus struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatus) GoString() string { + return s.String() +} + +// Describes the instance status. +type InstanceStatusDetails struct { + // The time when a status check failed. For an instance that was launched and + // impaired, this is the time when the instance was launched. + ImpairedSince *time.Time `locationName:"impairedSince" type:"timestamp" timestampFormat:"iso8601"` + + // The type of instance status. + Name *string `locationName:"name" type:"string" enum:"StatusName"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"StatusType"` + + metadataInstanceStatusDetails `json:"-" xml:"-"` +} + +type metadataInstanceStatusDetails struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusDetails) GoString() string { + return s.String() +} + +// Describes a scheduled event for an instance. +type InstanceStatusEvent struct { + // The event code. + Code *string `locationName:"code" type:"string" enum:"EventCode"` + + // A description of the event. + // + // After a scheduled event is completed, it can still be described for up to + // a week. If the event has been completed, this description starts with the + // following text: [Completed]. + Description *string `locationName:"description" type:"string"` + + // The latest scheduled end time for the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + + // The earliest scheduled start time for the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` + + metadataInstanceStatusEvent `json:"-" xml:"-"` +} + +type metadataInstanceStatusEvent struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusEvent) GoString() string { + return s.String() +} + +// Describes the status of an instance. +type InstanceStatusSummary struct { + // The system instance health or application instance health. + Details []*InstanceStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"SummaryStatus"` + + metadataInstanceStatusSummary `json:"-" xml:"-"` +} + +type metadataInstanceStatusSummary struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InstanceStatusSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusSummary) GoString() string { + return s.String() +} + +// Describes an Internet gateway. +type InternetGateway struct { + // Any VPCs attached to the Internet gateway. + Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string"` + + // Any tags assigned to the Internet gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + metadataInternetGateway `json:"-" xml:"-"` +} + +type metadataInternetGateway struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InternetGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGateway) GoString() string { + return s.String() +} + +// Describes the attachment of a VPC to an Internet gateway. +type InternetGatewayAttachment struct { + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataInternetGatewayAttachment `json:"-" xml:"-"` +} + +type metadataInternetGatewayAttachment struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s InternetGatewayAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGatewayAttachment) GoString() string { + return s.String() +} + +// Describes a security group rule. +type IpPermission struct { + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // A value of -1 indicates all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The protocol. + // + // When you call DescribeSecurityGroups, the protocol value returned is the + // number. Exception: For TCP, UDP, and ICMP, the value returned is the name + // (for example, tcp, udp, or icmp). For a list of protocol numbers, see Protocol + // Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). + // (VPC only) When you call AuthorizeSecurityGroupIngress, you can use -1 to + // specify all. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // One or more IP ranges. + IpRanges []*IpRange `locationName:"ipRanges" locationNameList:"item" type:"list"` + + // (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups + // only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress + // request, this is the AWS service that you want to access through a VPC endpoint + // from instances associated with the security group. + PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code. A value + // of -1 indicates all ICMP codes for the specified ICMP type. + ToPort *int64 `locationName:"toPort" type:"integer"` + + // One or more security group and AWS account ID pairs. + UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"` + + metadataIpPermission `json:"-" xml:"-"` +} + +type metadataIpPermission struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s IpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpPermission) GoString() string { + return s.String() +} + +// Describes an IP range. +type IpRange struct { + // The CIDR range. You can either specify a CIDR range or a source security + // group, not both. + CidrIp *string `locationName:"cidrIp" type:"string"` + + metadataIpRange `json:"-" xml:"-"` +} + +type metadataIpRange struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s IpRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpRange) GoString() string { + return s.String() +} + +// Describes a key pair. +type KeyPairInfo struct { + // If you used CreateKeyPair to create the key pair, this is the SHA-1 digest + // of the DER encoded private key. If you used ImportKeyPair to provide AWS + // the public key, this is the MD5 public key fingerprint as specified in section + // 4 of RFC4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + metadataKeyPairInfo `json:"-" xml:"-"` +} + +type metadataKeyPairInfo struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s KeyPairInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPairInfo) GoString() string { + return s.String() +} + +// Describes a launch permission. +type LaunchPermission struct { + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` + + metadataLaunchPermission `json:"-" xml:"-"` +} + +type metadataLaunchPermission struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s LaunchPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermission) GoString() string { + return s.String() +} + +// Describes a launch permission modification. +type LaunchPermissionModifications struct { + // The AWS account ID to add to the list of launch permissions for the AMI. + Add []*LaunchPermission `locationNameList:"item" type:"list"` + + // The AWS account ID to remove from the list of launch permissions for the + // AMI. + Remove []*LaunchPermission `locationNameList:"item" type:"list"` + + metadataLaunchPermissionModifications `json:"-" xml:"-"` +} + +type metadataLaunchPermissionModifications struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s LaunchPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermissionModifications) GoString() string { + return s.String() +} + +// Describes the launch specification for an instance. +type LaunchSpecification struct { + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` + + metadataLaunchSpecification `json:"-" xml:"-"` +} + +type metadataLaunchSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s LaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchSpecification) GoString() string { + return s.String() +} + +type ModifyImageAttributeInput struct { + // The name of the attribute to modify. + Attribute *string `type:"string"` + + // A description for the AMI. + Description *AttributeValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` + + // A launch permission modification. + LaunchPermission *LaunchPermissionModifications `type:"structure"` + + // The operation type. + OperationType *string `type:"string" enum:"OperationType"` + + // One or more product codes. After you add a product code to an AMI, it can't + // be removed. This is only valid when modifying the productCodes attribute. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // One or more user groups. This is only valid when modifying the launchPermission + // attribute. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // One or more AWS account IDs. This is only valid when modifying the launchPermission + // attribute. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` + + // The value of the attribute being modified. This is only valid when modifying + // the description attribute. + Value *string `type:"string"` + + metadataModifyImageAttributeInput `json:"-" xml:"-"` +} + +type metadataModifyImageAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeInput) GoString() string { + return s.String() +} + +type ModifyImageAttributeOutput struct { + metadataModifyImageAttributeOutput `json:"-" xml:"-"` +} + +type metadataModifyImageAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeOutput) GoString() string { + return s.String() +} + +type ModifyInstanceAttributeInput struct { + // The name of the attribute. + Attribute *string `locationName:"attribute" type:"string" enum:"InstanceAttributeName"` + + // Modifies the DeleteOnTermination attribute for volumes that are currently + // attached. The volume must be owned by the caller. If no value is specified + // for DeleteOnTermination, the default is true and the volume is deleted when + // the instance is terminated. + // + // To add instance store volumes to an Amazon EBS-backed instance, you must + // add them when you launch the instance. For more information, see Updating + // the Block Device Mapping when Launching an Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) + // in the Amazon Elastic Compute Cloud User Guide. + BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance using the Amazon EC2 + // console, CLI, or API; otherwise, you can. You cannot use this paramater for + // Spot Instances. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // [EC2-VPC] Changes the security groups of the instance. You must specify at + // least one security group, even if it's just the default security group for + // the VPC. You must specify the security group ID, not the security group name. + Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // Specifies whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // Changes the instance type to the specified value. For more information, see + // Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // Changes the instance's kernel to the specified value. We recommend that you + // use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Kernel *AttributeValue `locationName:"kernel" type:"structure"` + + // Changes the instance's RAM disk to the specified value. We recommend that + // you use PV-GRUB instead of kernels and RAM disks. For more information, see + // PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` + + // Specifies whether source/destination checking is enabled. A value of true + // means that checking is enabled, and false means checking is disabled. This + // value must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `type:"structure"` + + // Set to simple to enable enhanced networking for the instance. + // + // There is no way to disable enhanced networking at this time. + // + // This option is supported only for HVM instances. Specifying this option + // with a PV instance can make it unreachable. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // Changes the instance's user data to the specified value. + UserData *BlobAttributeValue `locationName:"userData" type:"structure"` + + // A new value for the attribute. Use only with the kernel, ramdisk, userData, + // disableApiTermination, or instanceInitiatedShutdownBehavior attribute. + Value *string `locationName:"value" type:"string"` + + metadataModifyInstanceAttributeInput `json:"-" xml:"-"` +} + +type metadataModifyInstanceAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeInput) GoString() string { + return s.String() +} + +type ModifyInstanceAttributeOutput struct { + metadataModifyInstanceAttributeOutput `json:"-" xml:"-"` +} + +type metadataModifyInstanceAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeOutput) GoString() string { + return s.String() +} + +type ModifyNetworkInterfaceAttributeInput struct { + // Information about the interface attachment. If modifying the 'delete on termination' + // attribute, you must specify the ID of the interface attachment. + Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"` + + // A description for the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Changes the security groups for the network interface. The new set of groups + // you specify replaces the current set. You must specify at least one group, + // even if it's just the default security group in the VPC. You must specify + // the ID of the security group, not the name. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. For more information, see + // NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` + + metadataModifyNetworkInterfaceAttributeInput `json:"-" xml:"-"` +} + +type metadataModifyNetworkInterfaceAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type ModifyNetworkInterfaceAttributeOutput struct { + metadataModifyNetworkInterfaceAttributeOutput `json:"-" xml:"-"` +} + +type metadataModifyNetworkInterfaceAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type ModifyReservedInstancesInput struct { + // A unique, case-sensitive token you provide to ensure idempotency of your + // modification request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the Reserved Instances to modify. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list" required:"true"` + + // The configuration settings for the Reserved Instances to modify. + TargetConfigurations []*ReservedInstancesConfiguration `locationName:"ReservedInstancesConfigurationSetItemType" locationNameList:"item" type:"list" required:"true"` + + metadataModifyReservedInstancesInput `json:"-" xml:"-"` +} + +type metadataModifyReservedInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesInput) GoString() string { + return s.String() +} + +type ModifyReservedInstancesOutput struct { + // The ID for the modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` + + metadataModifyReservedInstancesOutput `json:"-" xml:"-"` +} + +type metadataModifyReservedInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesOutput) GoString() string { + return s.String() +} + +type ModifySnapshotAttributeInput struct { + // The snapshot attribute to modify. + // + // Only volume creation permissions may be modified at the customer level. + Attribute *string `type:"string" enum:"SnapshotAttributeName"` + + // A JSON representation of the snapshot attribute modification. + CreateVolumePermission *CreateVolumePermissionModifications `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The group to modify for the snapshot. + GroupNames []*string `locationName:"UserGroup" locationNameList:"GroupName" type:"list"` + + // The type of operation to perform to the attribute. + OperationType *string `type:"string" enum:"OperationType"` + + // The ID of the snapshot. + SnapshotId *string `type:"string" required:"true"` + + // The account ID to modify for the snapshot. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` + + metadataModifySnapshotAttributeInput `json:"-" xml:"-"` +} + +type metadataModifySnapshotAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeInput) GoString() string { + return s.String() +} + +type ModifySnapshotAttributeOutput struct { + metadataModifySnapshotAttributeOutput `json:"-" xml:"-"` +} + +type metadataModifySnapshotAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifySpotFleetRequest. +type ModifySpotFleetRequestInput struct { + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The size of the fleet. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer"` + + metadataModifySpotFleetRequestInput `json:"-" xml:"-"` +} + +type metadataModifySpotFleetRequestInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestInput) GoString() string { + return s.String() +} + +// Contains the output of ModifySpotFleetRequest. +type ModifySpotFleetRequestOutput struct { + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` + + metadataModifySpotFleetRequestOutput `json:"-" xml:"-"` +} + +type metadataModifySpotFleetRequestOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestOutput) GoString() string { + return s.String() +} + +type ModifySubnetAttributeInput struct { + // Specify true to indicate that instances launched into the specified subnet + // should be assigned public IP address. + MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` + + metadataModifySubnetAttributeInput `json:"-" xml:"-"` +} + +type metadataModifySubnetAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifySubnetAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeInput) GoString() string { + return s.String() +} + +type ModifySubnetAttributeOutput struct { + metadataModifySubnetAttributeOutput `json:"-" xml:"-"` +} + +type metadataModifySubnetAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifySubnetAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVolumeAttributeInput struct { + // Indicates whether the volume should be auto-enabled for I/O operations. + AutoEnableIO *AttributeBooleanValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` + + metadataModifyVolumeAttributeInput `json:"-" xml:"-"` +} + +type metadataModifyVolumeAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeInput) GoString() string { + return s.String() +} + +type ModifyVolumeAttributeOutput struct { + metadataModifyVolumeAttributeOutput `json:"-" xml:"-"` +} + +type metadataModifyVolumeAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVpcAttributeInput struct { + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // enabled, instances in the VPC get DNS hostnames; otherwise, they do not. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. You can only enable + // DNS hostnames if you've enabled DNS support. + EnableDnsHostnames *AttributeBooleanValue `type:"structure"` + + // Indicates whether the DNS resolution is supported for the VPC. If enabled, + // queries to the Amazon provided DNS server at the 169.254.169.253 IP address, + // or the reserved IP address at the base of the VPC network range "plus two" + // will succeed. If disabled, the Amazon provided DNS service in the VPC that + // resolves public DNS hostnames to IP addresses is not enabled. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. + EnableDnsSupport *AttributeBooleanValue `type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` + + metadataModifyVpcAttributeInput `json:"-" xml:"-"` +} + +type metadataModifyVpcAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeInput) GoString() string { + return s.String() +} + +type ModifyVpcAttributeOutput struct { + metadataModifyVpcAttributeOutput `json:"-" xml:"-"` +} + +type metadataModifyVpcAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVpcEndpointInput struct { + // One or more route tables IDs to associate with the endpoint. + AddRouteTableIds []*string `locationName:"AddRouteTableId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // A policy document to attach to the endpoint. The policy must be in valid + // JSON format. + PolicyDocument *string `type:"string"` + + // One or more route table IDs to disassociate from the endpoint. + RemoveRouteTableIds []*string `locationName:"RemoveRouteTableId" locationNameList:"item" type:"list"` + + // Specify true to reset the policy document to the default policy. The default + // policy allows access to the service. + ResetPolicy *bool `type:"boolean"` + + // The ID of the endpoint. + VpcEndpointId *string `type:"string" required:"true"` + + metadataModifyVpcEndpointInput `json:"-" xml:"-"` +} + +type metadataModifyVpcEndpointInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointInput) GoString() string { + return s.String() +} + +type ModifyVpcEndpointOutput struct { + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` + + metadataModifyVpcEndpointOutput `json:"-" xml:"-"` +} + +type metadataModifyVpcEndpointOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointOutput) GoString() string { + return s.String() +} + +type MonitorInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + metadataMonitorInstancesInput `json:"-" xml:"-"` +} + +type metadataMonitorInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s MonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesInput) GoString() string { + return s.String() +} + +type MonitorInstancesOutput struct { + // Monitoring information for one or more instances. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` + + metadataMonitorInstancesOutput `json:"-" xml:"-"` +} + +type metadataMonitorInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s MonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesOutput) GoString() string { + return s.String() +} + +// Describes the monitoring for the instance. +type Monitoring struct { + // Indicates whether monitoring is enabled for the instance. + State *string `locationName:"state" type:"string" enum:"MonitoringState"` + + metadataMonitoring `json:"-" xml:"-"` +} + +type metadataMonitoring struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Monitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Monitoring) GoString() string { + return s.String() +} + +type MoveAddressToVpcInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` + + metadataMoveAddressToVpcInput `json:"-" xml:"-"` +} + +type metadataMoveAddressToVpcInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s MoveAddressToVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcInput) GoString() string { + return s.String() +} + +type MoveAddressToVpcOutput struct { + // The allocation ID for the Elastic IP address. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The status of the move of the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` + + metadataMoveAddressToVpcOutput `json:"-" xml:"-"` +} + +type metadataMoveAddressToVpcOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s MoveAddressToVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcOutput) GoString() string { + return s.String() +} + +// Describes the status of a moving Elastic IP address. +type MovingAddressStatus struct { + // The status of the Elastic IP address that's being moved to the EC2-VPC platform, + // or restored to the EC2-Classic platform. + MoveStatus *string `locationName:"moveStatus" type:"string" enum:"MoveStatus"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + metadataMovingAddressStatus `json:"-" xml:"-"` +} + +type metadataMovingAddressStatus struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s MovingAddressStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MovingAddressStatus) GoString() string { + return s.String() +} + +// Describes a network ACL. +type NetworkAcl struct { + // Any associations between the network ACL and one or more subnets + Associations []*NetworkAclAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // One or more entries (rules) in the network ACL. + Entries []*NetworkAclEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // Indicates whether this is the default network ACL for the VPC. + IsDefault *bool `locationName:"default" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // Any tags assigned to the network ACL. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC for the network ACL. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataNetworkAcl `json:"-" xml:"-"` +} + +type metadataNetworkAcl struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NetworkAcl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAcl) GoString() string { + return s.String() +} + +// Describes an association between a network ACL and a subnet. +type NetworkAclAssociation struct { + // The ID of the association between a network ACL and a subnet. + NetworkAclAssociationId *string `locationName:"networkAclAssociationId" type:"string"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + metadataNetworkAclAssociation `json:"-" xml:"-"` +} + +type metadataNetworkAclAssociation struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NetworkAclAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclAssociation) GoString() string { + return s.String() +} + +// Describes an entry in a network ACL. +type NetworkAclEntry struct { + // The network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether the rule is an egress rule (applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean"` + + // ICMP protocol: The ICMP type and code. + IcmpTypeCode *IcmpTypeCode `locationName:"icmpTypeCode" type:"structure"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol. A value of -1 means all protocols. + Protocol *string `locationName:"protocol" type:"string"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" enum:"RuleAction"` + + // The rule number for the entry. ACL entries are processed in ascending order + // by rule number. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer"` + + metadataNetworkAclEntry `json:"-" xml:"-"` +} + +type metadataNetworkAclEntry struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NetworkAclEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclEntry) GoString() string { + return s.String() +} + +// Describes a network interface. +type NetworkInterface struct { + // The association information for an Elastic IP associated with the network + // interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A description. + Description *string `locationName:"description" type:"string"` + + // Any security groups for the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The AWS account ID of the owner of the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IP addresses associated with the network interface. + PrivateIpAddresses []*NetworkInterfacePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // The ID of the entity that launched the instance on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // Indicates whether the network interface is being managed by AWS. + RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"` + + // Indicates whether traffic to or from the instance is validated. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the network interface. + TagSet []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataNetworkInterface `json:"-" xml:"-"` +} + +type metadataNetworkInterface struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterface) GoString() string { + return s.String() +} + +// Describes association information for an Elastic IP address. +type NetworkInterfaceAssociation struct { + // The allocation ID. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The association ID. + AssociationId *string `locationName:"associationId" type:"string"` + + // The ID of the Elastic IP address owner. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The address of the Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` + + metadataNetworkInterfaceAssociation `json:"-" xml:"-"` +} + +type metadataNetworkInterfaceAssociation struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// Describes a network interface attachment. +type NetworkInterfaceAttachment struct { + // The timestamp indicating when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device index of the network interface attachment on the instance. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` + + metadataNetworkInterfaceAttachment `json:"-" xml:"-"` +} + +type metadataNetworkInterfaceAttachment struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// Describes an attachment change. +type NetworkInterfaceAttachmentChanges struct { + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + metadataNetworkInterfaceAttachmentChanges `json:"-" xml:"-"` +} + +type metadataNetworkInterfaceAttachmentChanges struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachmentChanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachmentChanges) GoString() string { + return s.String() +} + +// Describes the private IP address of a network interface. +type NetworkInterfacePrivateIpAddress struct { + // The association information for an Elastic IP address associated with the + // network interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IP address is the primary private IP address of the + // network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + metadataNetworkInterfacePrivateIpAddress `json:"-" xml:"-"` +} + +type metadataNetworkInterfacePrivateIpAddress struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NetworkInterfacePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePrivateIpAddress) GoString() string { + return s.String() +} + +type NewDhcpConfiguration struct { + Key *string `locationName:"key" type:"string"` + + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` + + metadataNewDhcpConfiguration `json:"-" xml:"-"` +} + +type metadataNewDhcpConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s NewDhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewDhcpConfiguration) GoString() string { + return s.String() +} + +// Describes the placement for the instance. +type Placement struct { + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group the instance is in (for cluster compute instances). + GroupName *string `locationName:"groupName" type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. + Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` + + metadataPlacement `json:"-" xml:"-"` +} + +type metadataPlacement struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Placement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Placement) GoString() string { + return s.String() +} + +// Describes a placement group. +type PlacementGroup struct { + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string"` + + // The state of the placement group. + State *string `locationName:"state" type:"string" enum:"PlacementGroupState"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"` + + metadataPlacementGroup `json:"-" xml:"-"` +} + +type metadataPlacementGroup struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PlacementGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementGroup) GoString() string { + return s.String() +} + +// Describes a range of ports. +type PortRange struct { + // The first port in the range. + From *int64 `locationName:"from" type:"integer"` + + // The last port in the range. + To *int64 `locationName:"to" type:"integer"` + + metadataPortRange `json:"-" xml:"-"` +} + +type metadataPortRange struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortRange) GoString() string { + return s.String() +} + +// Describes prefixes for AWS services. +type PrefixList struct { + // The IP address range of the AWS service. + Cidrs []*string `locationName:"cidrSet" locationNameList:"item" type:"list"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix. + PrefixListName *string `locationName:"prefixListName" type:"string"` + + metadataPrefixList `json:"-" xml:"-"` +} + +type metadataPrefixList struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixList) GoString() string { + return s.String() +} + +// The ID of the prefix. +type PrefixListId struct { + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + metadataPrefixListId `json:"-" xml:"-"` +} + +type metadataPrefixListId struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PrefixListId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListId) GoString() string { + return s.String() +} + +// Describes the price for a Reserved Instance. +type PriceSchedule struct { + // The current price schedule, as determined by the term remaining for the Reserved + // Instance in the listing. + // + // A specific price schedule is always in effect, but only one price schedule + // can be active at any time. Take, for example, a Reserved Instance listing + // that has five months remaining in its term. When you specify price schedules + // for five months and two months, this means that schedule 1, covering the + // first three months of the remaining term, will be active during months 5, + // 4, and 3. Then schedule 2, covering the last two months of the term, will + // be active for months 2 and 1. + Active *bool `locationName:"active" type:"boolean"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` + + metadataPriceSchedule `json:"-" xml:"-"` +} + +type metadataPriceSchedule struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PriceSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceSchedule) GoString() string { + return s.String() +} + +// Describes the price for a Reserved Instance. +type PriceScheduleSpecification struct { + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` + + metadataPriceScheduleSpecification `json:"-" xml:"-"` +} + +type metadataPriceScheduleSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PriceScheduleSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceScheduleSpecification) GoString() string { + return s.String() +} + +// Describes a Reserved Instance offering. +type PricingDetail struct { + // The number of instances available for the price. + Count *int64 `locationName:"count" type:"integer"` + + // The price per instance. + Price *float64 `locationName:"price" type:"double"` + + metadataPricingDetail `json:"-" xml:"-"` +} + +type metadataPricingDetail struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PricingDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PricingDetail) GoString() string { + return s.String() +} + +// Describes a secondary private IP address for a network interface. +type PrivateIpAddressSpecification struct { + // Indicates whether the private IP address is the primary private IP address. + // Only one IP address can be designated as primary. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private IP addresses. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string" required:"true"` + + metadataPrivateIpAddressSpecification `json:"-" xml:"-"` +} + +type metadataPrivateIpAddressSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PrivateIpAddressSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrivateIpAddressSpecification) GoString() string { + return s.String() +} + +// Describes a product code. +type ProductCode struct { + // The product code. + ProductCodeId *string `locationName:"productCode" type:"string"` + + // The type of product code. + ProductCodeType *string `locationName:"type" type:"string" enum:"ProductCodeValues"` + + metadataProductCode `json:"-" xml:"-"` +} + +type metadataProductCode struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ProductCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductCode) GoString() string { + return s.String() +} + +// Describes a virtual private gateway propagating route. +type PropagatingVgw struct { + // The ID of the virtual private gateway (VGW). + GatewayId *string `locationName:"gatewayId" type:"string"` + + metadataPropagatingVgw `json:"-" xml:"-"` +} + +type metadataPropagatingVgw struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PropagatingVgw) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PropagatingVgw) GoString() string { + return s.String() +} + +type PurchaseReservedInstancesOfferingInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The number of Reserved Instances to purchase. + InstanceCount *int64 `type:"integer" required:"true"` + + // Specified for Reserved Instance Marketplace offerings to limit the total + // order and ensure that the Reserved Instances are not purchased at unexpected + // prices. + LimitPrice *ReservedInstanceLimitPrice `locationName:"limitPrice" type:"structure"` + + // The ID of the Reserved Instance offering to purchase. + ReservedInstancesOfferingId *string `type:"string" required:"true"` + + metadataPurchaseReservedInstancesOfferingInput `json:"-" xml:"-"` +} + +type metadataPurchaseReservedInstancesOfferingInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedInstancesOfferingOutput struct { + // The IDs of the purchased Reserved Instances. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + metadataPurchaseReservedInstancesOfferingOutput `json:"-" xml:"-"` +} + +type metadataPurchaseReservedInstancesOfferingOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) GoString() string { + return s.String() +} + +type RebootInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + metadataRebootInstancesInput `json:"-" xml:"-"` +} + +type metadataRebootInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RebootInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesInput) GoString() string { + return s.String() +} + +type RebootInstancesOutput struct { + metadataRebootInstancesOutput `json:"-" xml:"-"` +} + +type metadataRebootInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RebootInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesOutput) GoString() string { + return s.String() +} + +// Describes a recurring charge. +type RecurringCharge struct { + // The amount of the recurring charge. + Amount *float64 `locationName:"amount" type:"double"` + + // The frequency of the recurring charge. + Frequency *string `locationName:"frequency" type:"string" enum:"RecurringChargeFrequency"` + + metadataRecurringCharge `json:"-" xml:"-"` +} + +type metadataRecurringCharge struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Describes a region. +type Region struct { + // The region service endpoint. + Endpoint *string `locationName:"regionEndpoint" type:"string"` + + // The name of the region. + RegionName *string `locationName:"regionName" type:"string"` + + metadataRegion `json:"-" xml:"-"` +} + +type metadataRegion struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Region) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Region) GoString() string { + return s.String() +} + +type RegisterImageInput struct { + // The architecture of the AMI. + // + // Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, + // the architecture specified in the manifest file. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for your AMI. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The full path to your AMI manifest in Amazon S3 storage. + ImageLocation *string `type:"string"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // A name for your AMI. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + Name *string `locationName:"name" type:"string" required:"true"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The name of the root device (for example, /dev/sda1, or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // Set to simple to enable enhanced networking for the AMI and any instances + // that you launch from the AMI. + // + // There is no way to disable enhanced networking at this time. + // + // This option is supported only for HVM AMIs. Specifying this option with + // a PV AMI can make instances launched from the AMI unreachable. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The type of virtualization. + // + // Default: paravirtual + VirtualizationType *string `locationName:"virtualizationType" type:"string"` + + metadataRegisterImageInput `json:"-" xml:"-"` +} + +type metadataRegisterImageInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RegisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageInput) GoString() string { + return s.String() +} + +type RegisterImageOutput struct { + // The ID of the newly registered AMI. + ImageId *string `locationName:"imageId" type:"string"` + + metadataRegisterImageOutput `json:"-" xml:"-"` +} + +type metadataRegisterImageOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RegisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageOutput) GoString() string { + return s.String() +} + +type RejectVpcPeeringConnectionInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` + + metadataRejectVpcPeeringConnectionInput `json:"-" xml:"-"` +} + +type metadataRejectVpcPeeringConnectionInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type RejectVpcPeeringConnectionOutput struct { + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` + + metadataRejectVpcPeeringConnectionOutput `json:"-" xml:"-"` +} + +type metadataRejectVpcPeeringConnectionOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type ReleaseAddressInput struct { + // [EC2-VPC] The allocation ID. Required for EC2-VPC. + AllocationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` + + metadataReleaseAddressInput `json:"-" xml:"-"` +} + +type metadataReleaseAddressInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReleaseAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressInput) GoString() string { + return s.String() +} + +type ReleaseAddressOutput struct { + metadataReleaseAddressOutput `json:"-" xml:"-"` +} + +type metadataReleaseAddressOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReleaseAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressOutput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclAssociationInput struct { + // The ID of the current association between the original network ACL and the + // subnet. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new network ACL to associate with the subnet. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + metadataReplaceNetworkAclAssociationInput `json:"-" xml:"-"` +} + +type metadataReplaceNetworkAclAssociationInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationInput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclAssociationOutput struct { + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` + + metadataReplaceNetworkAclAssociationOutput `json:"-" xml:"-"` +} + +type metadataReplaceNetworkAclAssociationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationOutput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclEntryInput struct { + // The network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether to replace the egress rule. + // + // Default: If no value is specified, we replace the ingress rule. + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for + // the protocol. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The ID of the ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. Required if + // specifying 6 (TCP) or 17 (UDP) for the protocol. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The IP protocol. You can specify all or -1 to mean all protocols. + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number of the entry to replace. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` + + metadataReplaceNetworkAclEntryInput `json:"-" xml:"-"` +} + +type metadataReplaceNetworkAclEntryInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryInput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclEntryOutput struct { + metadataReplaceNetworkAclEntryOutput `json:"-" xml:"-"` +} + +type metadataReplaceNetworkAclEntryOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type ReplaceRouteInput struct { + // The CIDR address block used for the destination match. The value you provide + // must match the CIDR of an existing route in the table. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of an Internet gateway or virtual private gateway. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` + + metadataReplaceRouteInput `json:"-" xml:"-"` +} + +type metadataReplaceRouteInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReplaceRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteInput) GoString() string { + return s.String() +} + +type ReplaceRouteOutput struct { + metadataReplaceRouteOutput `json:"-" xml:"-"` +} + +type metadataReplaceRouteOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReplaceRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteOutput) GoString() string { + return s.String() +} + +type ReplaceRouteTableAssociationInput struct { + // The association ID. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new route table to associate with the subnet. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + metadataReplaceRouteTableAssociationInput `json:"-" xml:"-"` +} + +type metadataReplaceRouteTableAssociationInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationInput) GoString() string { + return s.String() +} + +type ReplaceRouteTableAssociationOutput struct { + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` + + metadataReplaceRouteTableAssociationOutput `json:"-" xml:"-"` +} + +type metadataReplaceRouteTableAssociationOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationOutput) GoString() string { + return s.String() +} + +type ReportInstanceStatusInput struct { + // Descriptive text about the health state of your instance. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The time at which the reported instance health state ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // One or more instances. + Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + // One or more reason codes that describes the health state of your instance. + // + // instance-stuck-in-state: My instance is stuck in a state. + // + // unresponsive: My instance is unresponsive. + // + // not-accepting-credentials: My instance is not accepting my credentials. + // + // password-not-available: A password is not available for my instance. + // + // performance-network: My instance is experiencing performance problems which + // I believe are network related. + // + // performance-instance-store: My instance is experiencing performance problems + // which I believe are related to the instance stores. + // + // performance-ebs-volume: My instance is experiencing performance problems + // which I believe are related to an EBS volume. + // + // performance-other: My instance is experiencing performance problems. + // + // other: [explain using the description parameter] + ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"` + + // The time at which the reported instance health state began. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The status of all instances listed. + Status *string `locationName:"status" type:"string" required:"true" enum:"ReportStatusType"` + + metadataReportInstanceStatusInput `json:"-" xml:"-"` +} + +type metadataReportInstanceStatusInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReportInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusInput) GoString() string { + return s.String() +} + +type ReportInstanceStatusOutput struct { + metadataReportInstanceStatusOutput `json:"-" xml:"-"` +} + +type metadataReportInstanceStatusOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReportInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RequestSpotFleet. +type RequestSpotFleetInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The configuration for the Spot fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` + + metadataRequestSpotFleetInput `json:"-" xml:"-"` +} + +type metadataRequestSpotFleetInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RequestSpotFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetInput) GoString() string { + return s.String() +} + +// Contains the output of RequestSpotFleet. +type RequestSpotFleetOutput struct { + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + metadataRequestSpotFleetOutput `json:"-" xml:"-"` +} + +type metadataRequestSpotFleetOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RequestSpotFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RequestSpotInstances. +type RequestSpotInstancesInput struct { + // The user-specified name for a logical grouping of bids. + // + // When you specify an Availability Zone group in a Spot Instance request, + // all Spot instances in the request are launched in the same Availability Zone. + // Instance proximity is maintained with this parameter, but the choice of Availability + // Zone is not. The group applies only to bids for Spot Instances of the same + // instance type. Any additional Spot instance requests that are specified with + // the same Availability Zone group name are launched in that same Availability + // Zone, as long as at least one instance from the group is still active. + // + // If there is no active instance running in the Availability Zone group that + // you specify for a new Spot instance request (all instances are terminated, + // the bid is expired, or the bid falls below current market), then Amazon EC2 + // launches the instance in any Availability Zone where the constraint can be + // met. Consequently, the subsequent set of Spot instances could be placed in + // a different zone from the original request, even if you specified the same + // Availability Zone group. + // + // Default: Instances are launched in any available Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The required duration for the Spot instances, in minutes. This value must + // be a multiple of 60 (60, 120, 180, 240, 300, or 360). + // + // The duration period starts as soon as your Spot instance receives its instance + // ID. At the end of the duration period, Amazon EC2 marks the Spot instance + // for termination and provides a Spot instance termination notice, which gives + // the instance a two-minute warning before it terminates. + // + // Note that you can't specify an Availability Zone group or a launch group + // if you specify a duration. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of Spot instances to launch. + // + // Default: 1 + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance launch group. Launch groups are Spot instances that launch together + // and terminate together. + // + // Default: Instances are launched and terminated individually + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Describes the launch specification for an instance. + LaunchSpecification *RequestSpotLaunchSpecification `type:"structure"` + + // The maximum hourly price (bid) for any Spot instance launched to fulfill + // the request. + SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"` + + // The Spot instance request type. + // + // Default: one-time + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request. If this is a one-time request, the request + // becomes active at this date and time and remains active until all instances + // launch, the request expires, or the request is canceled. If the request is + // persistent, the request becomes active at this date and time and remains + // active until it expires or is canceled. + // + // Default: The request is effective indefinitely. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date of the request. If this is a one-time request, the request remains + // active until all instances launch, the request is canceled, or this date + // is reached. If the request is persistent, it remains active until it is canceled + // or this date and time is reached. + // + // Default: The request is effective indefinitely. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` + + metadataRequestSpotInstancesInput `json:"-" xml:"-"` +} + +type metadataRequestSpotInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RequestSpotInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of RequestSpotInstances. +type RequestSpotInstancesOutput struct { + // One or more Spot instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` + + metadataRequestSpotInstancesOutput `json:"-" xml:"-"` +} + +type metadataRequestSpotInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RequestSpotInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesOutput) GoString() string { + return s.String() +} + +// Describes the launch specification for an instance. +type RequestSpotLaunchSpecification struct { + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"NetworkInterface" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` + + metadataRequestSpotLaunchSpecification `json:"-" xml:"-"` +} + +type metadataRequestSpotLaunchSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RequestSpotLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotLaunchSpecification) GoString() string { + return s.String() +} + +// Describes a reservation. +type Reservation struct { + // One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // One or more instances. + Instances []*Instance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The ID of the AWS account that owns the reservation. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the requester that launched the instances on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // The ID of the reservation. + ReservationId *string `locationName:"reservationId" type:"string"` + + metadataReservation `json:"-" xml:"-"` +} + +type metadataReservation struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Reservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Reservation) GoString() string { + return s.String() +} + +// Describes the limit price of a Reserved Instance offering. +type ReservedInstanceLimitPrice struct { + // Used for Reserved Instance Marketplace offerings. Specifies the limit price + // on the total order (instanceCount * price). + Amount *float64 `locationName:"amount" type:"double"` + + // The currency in which the limitPrice amount is specified. At this time, the + // only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + metadataReservedInstanceLimitPrice `json:"-" xml:"-"` +} + +type metadataReservedInstanceLimitPrice struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReservedInstanceLimitPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstanceLimitPrice) GoString() string { + return s.String() +} + +// Describes a Reserved Instance. +type ReservedInstances struct { + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance. It's specified using ISO 4217 standard + // currency codes. At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The time when the Reserved Instance expires. + End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The number of Reserved Instances purchased. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The tenancy of the reserved instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The date and time the Reserved Instance started. + Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"` + + // The state of the Reserved Instance purchase. + State *string `locationName:"state" type:"string" enum:"ReservedInstanceState"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` + + metadataReservedInstances `json:"-" xml:"-"` +} + +type metadataReservedInstances struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReservedInstances) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstances) GoString() string { + return s.String() +} + +// Describes the configuration settings for the modified Reserved Instances. +type ReservedInstancesConfiguration struct { + // The Availability Zone for the modified Reserved Instances. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of modified Reserved Instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type for the modified Reserved Instances. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The network platform of the modified Reserved Instances, which is either + // EC2-Classic or EC2-VPC. + Platform *string `locationName:"platform" type:"string"` + + metadataReservedInstancesConfiguration `json:"-" xml:"-"` +} + +type metadataReservedInstancesConfiguration struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesConfiguration) GoString() string { + return s.String() +} + +// Describes the ID of a Reserved Instance. +type ReservedInstancesId struct { + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + metadataReservedInstancesId `json:"-" xml:"-"` +} + +type metadataReservedInstancesId struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesId) GoString() string { + return s.String() +} + +// Describes a Reserved Instance listing. +type ReservedInstancesListing struct { + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time the listing was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The number of instances in this state. + InstanceCounts []*InstanceCount `locationName:"instanceCounts" locationNameList:"item" type:"list"` + + // The price of the Reserved Instance listing. + PriceSchedules []*PriceSchedule `locationName:"priceSchedules" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` + + // The status of the Reserved Instance listing. + Status *string `locationName:"status" type:"string" enum:"ListingStatus"` + + // The reason for the current status of the Reserved Instance listing. The response + // can be blank. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The last modified timestamp of the listing. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` + + metadataReservedInstancesListing `json:"-" xml:"-"` +} + +type metadataReservedInstancesListing struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesListing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesListing) GoString() string { + return s.String() +} + +// Describes a Reserved Instance modification. +type ReservedInstancesModification struct { + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time when the modification request was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The time for the modification to become effective. + EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp" timestampFormat:"iso8601"` + + // Contains target configurations along with their corresponding new Reserved + // Instance IDs. + ModificationResults []*ReservedInstancesModificationResult `locationName:"modificationResultSet" locationNameList:"item" type:"list"` + + // The IDs of one or more Reserved Instances. + ReservedInstancesIds []*ReservedInstancesId `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` + + // A unique ID for the Reserved Instance modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` + + // The status of the Reserved Instances modification request. + Status *string `locationName:"status" type:"string"` + + // The reason for the status. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The time when the modification request was last updated. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` + + metadataReservedInstancesModification `json:"-" xml:"-"` +} + +type metadataReservedInstancesModification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesModification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModification) GoString() string { + return s.String() +} + +type ReservedInstancesModificationResult struct { + // The ID for the Reserved Instances that were created as part of the modification + // request. This field is only available when the modification is fulfilled. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The target Reserved Instances configurations supplied as part of the modification + // request. + TargetConfiguration *ReservedInstancesConfiguration `locationName:"targetConfiguration" type:"structure"` + + metadataReservedInstancesModificationResult `json:"-" xml:"-"` +} + +type metadataReservedInstancesModificationResult struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesModificationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModificationResult) GoString() string { + return s.String() +} + +// Describes a Reserved Instance offering. +type ReservedInstancesOffering struct { + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance offering you are purchasing. It's specified + // using ISO 4217 standard currency codes. At this time, the only supported + // currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The tenancy of the reserved instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether the offering is available through the Reserved Instance + // Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, + // this is true. + Marketplace *bool `locationName:"marketplace" type:"boolean"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The pricing details of the Reserved Instance offering. + PricingDetails []*PricingDetail `locationName:"pricingDetailsSet" locationNameList:"item" type:"list"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance offering. + ReservedInstancesOfferingId *string `locationName:"reservedInstancesOfferingId" type:"string"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` + + metadataReservedInstancesOffering `json:"-" xml:"-"` +} + +type metadataReservedInstancesOffering struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesOffering) GoString() string { + return s.String() +} + +type ResetImageAttributeInput struct { + // The attribute to reset (currently you can only reset the launch permission + // attribute). + Attribute *string `type:"string" required:"true" enum:"ResetImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` + + metadataResetImageAttributeInput `json:"-" xml:"-"` +} + +type metadataResetImageAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ResetImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeInput) GoString() string { + return s.String() +} + +type ResetImageAttributeOutput struct { + metadataResetImageAttributeOutput `json:"-" xml:"-"` +} + +type metadataResetImageAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ResetImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeOutput) GoString() string { + return s.String() +} + +type ResetInstanceAttributeInput struct { + // The attribute to reset. + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + metadataResetInstanceAttributeInput `json:"-" xml:"-"` +} + +type metadataResetInstanceAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ResetInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeInput) GoString() string { + return s.String() +} + +type ResetInstanceAttributeOutput struct { + metadataResetInstanceAttributeOutput `json:"-" xml:"-"` +} + +type metadataResetInstanceAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ResetInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeOutput) GoString() string { + return s.String() +} + +type ResetNetworkInterfaceAttributeInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The source/destination checking attribute. Resets the value to true. + SourceDestCheck *string `locationName:"sourceDestCheck" type:"string"` + + metadataResetNetworkInterfaceAttributeInput `json:"-" xml:"-"` +} + +type metadataResetNetworkInterfaceAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type ResetNetworkInterfaceAttributeOutput struct { + metadataResetNetworkInterfaceAttributeOutput `json:"-" xml:"-"` +} + +type metadataResetNetworkInterfaceAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type ResetSnapshotAttributeInput struct { + // The attribute to reset. Currently, only the attribute for permission to create + // volumes can be reset. + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the snapshot. + SnapshotId *string `type:"string" required:"true"` + + metadataResetSnapshotAttributeInput `json:"-" xml:"-"` +} + +type metadataResetSnapshotAttributeInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeInput) GoString() string { + return s.String() +} + +type ResetSnapshotAttributeOutput struct { + metadataResetSnapshotAttributeOutput `json:"-" xml:"-"` +} + +type metadataResetSnapshotAttributeOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type RestoreAddressToClassicInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` + + metadataRestoreAddressToClassicInput `json:"-" xml:"-"` +} + +type metadataRestoreAddressToClassicInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RestoreAddressToClassicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicInput) GoString() string { + return s.String() +} + +type RestoreAddressToClassicOutput struct { + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + // The move status for the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` + + metadataRestoreAddressToClassicOutput `json:"-" xml:"-"` +} + +type metadataRestoreAddressToClassicOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RestoreAddressToClassicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicOutput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupEgressInput struct { + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // A set of IP permissions. You can't specify a destination security group and + // a CIDR IP address range. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // Use -1 to specify all. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The name of a destination security group. To revoke outbound access to a + // destination security group, we recommend that you use a set of IP permissions + // instead. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // The AWS account number for a destination security group. To revoke outbound + // access to a destination security group, we recommend that you use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `locationName:"toPort" type:"integer"` + + metadataRevokeSecurityGroupEgressInput `json:"-" xml:"-"` +} + +type metadataRevokeSecurityGroupEgressInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupEgressOutput struct { + metadataRevokeSecurityGroupEgressOutput `json:"-" xml:"-"` +} + +type metadataRevokeSecurityGroupEgressOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupIngressInput struct { + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. Required for a security group in a nondefault + // VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. + GroupName *string `type:"string"` + + // A set of IP permissions. You can't specify a source security group and a + // CIDR IP address range. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. For EC2-VPC, the source security group must be + // in the same VPC. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic, default VPC] The AWS account ID of the source security group. + // For EC2-VPC, the source security group must be in the same VPC. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the IP protocol, the start of the port range, and + // the end of the port range. To revoke a specific rule for an IP protocol and + // port range, use a set of IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` + + metadataRevokeSecurityGroupIngressInput `json:"-" xml:"-"` +} + +type metadataRevokeSecurityGroupIngressInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupIngressOutput struct { + metadataRevokeSecurityGroupIngressOutput `json:"-" xml:"-"` +} + +type metadataRevokeSecurityGroupIngressOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes a route in a route table. +type Route struct { + // The CIDR block used for the destination match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The prefix of the AWS service. + DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"` + + // The ID of a gateway attached to your VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Describes how the route was created. + // + // CreateRouteTable indicates that route was automatically created when the + // route table was created. CreateRoute indicates that the route was manually + // added to the route table. EnableVgwRoutePropagation indicates that the route + // was propagated by route propagation. + Origin *string `locationName:"origin" type:"string" enum:"RouteOrigin"` + + // The state of the route. The blackhole state indicates that the route's target + // isn't available (for example, the specified gateway isn't attached to the + // VPC, or the specified NAT instance has been terminated). + State *string `locationName:"state" type:"string" enum:"RouteState"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` + + metadataRoute `json:"-" xml:"-"` +} + +type metadataRoute struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Route) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Route) GoString() string { + return s.String() +} + +// Describes a route table. +type RouteTable struct { + // The associations between the route table and one or more subnets. + Associations []*RouteTableAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // Any virtual private gateway (VGW) propagating routes. + PropagatingVgws []*PropagatingVgw `locationName:"propagatingVgwSet" locationNameList:"item" type:"list"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The routes in the route table. + Routes []*Route `locationName:"routeSet" locationNameList:"item" type:"list"` + + // Any tags assigned to the route table. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataRouteTable `json:"-" xml:"-"` +} + +type metadataRouteTable struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RouteTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTable) GoString() string { + return s.String() +} + +// Describes an association between a route table and a subnet. +type RouteTableAssociation struct { + // Indicates whether this is the main route table. + Main *bool `locationName:"main" type:"boolean"` + + // The ID of the association between a route table and a subnet. + RouteTableAssociationId *string `locationName:"routeTableAssociationId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The ID of the subnet. A subnet ID is not returned for an implicit association. + SubnetId *string `locationName:"subnetId" type:"string"` + + metadataRouteTableAssociation `json:"-" xml:"-"` +} + +type metadataRouteTableAssociation struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RouteTableAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTableAssociation) GoString() string { + return s.String() +} + +type RunInstancesInput struct { + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The block device mapping. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Maximum 64 ASCII characters + ClientToken *string `locationName:"clientToken" type:"string"` + + // If you set this parameter to true, you can't terminate the instance using + // the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this + // parameter to true and then later want to be able to terminate the instance, + // you must first change the value of the disableApiTermination attribute to + // false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior + // to terminate, you can terminate the instance by running the shutdown command + // from the instance. + // + // Default: false + DisableApiTermination *bool `locationName:"disableApiTermination" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI, which you can get by calling DescribeImages. + ImageId *string `type:"string" required:"true"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + // + // Default: stop + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: m1.small + InstanceType *string `type:"string" enum:"InstanceType"` + + // The ID of the kernel. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For + // more information, see PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + KernelId *string `type:"string"` + + // The name of the key pair. You can create a key pair using CreateKeyPair or + // ImportKeyPair. + // + // If you do not specify a key pair, you can't connect to the instance unless + // you choose an AMI that is configured to allow users another way to log in. + KeyName *string `type:"string"` + + // The maximum number of instances to launch. If you specify more instances + // than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches + // the largest possible number of instances above MinCount. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 General FAQ. + MaxCount *int64 `type:"integer" required:"true"` + + // The minimum number of instances to launch. If you specify a minimum that + // is more instances than Amazon EC2 can launch in the target Availability Zone, + // Amazon EC2 launches no instances. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 General FAQ. + MinCount *int64 `type:"integer" required:"true"` + + // The monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"` + + // The placement for the instance. + Placement *Placement `type:"structure"` + + // [EC2-VPC] The primary IP address. You must specify a value from the IP address + // range of the subnet. + // + // Only one private IP address can be designated as primary. Therefore, you + // can't specify this parameter if PrivateIpAddresses.n.Primary is set to true + // and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address. + // + // Default: We select an IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The ID of the RAM disk. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For + // more information, see PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + RamdiskId *string `type:"string"` + + // One or more security group IDs. You can create a security group using CreateSecurityGroup. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // [EC2-Classic, default VPC] One or more security group names. For a nondefault + // VPC, you must use security group IDs instead. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` + + // [EC2-VPC] The ID of the subnet to launch the instance into. + SubnetId *string `type:"string"` + + // The Base64-encoded MIME user data for the instances. + UserData *string `type:"string"` + + metadataRunInstancesInput `json:"-" xml:"-"` +} + +type metadataRunInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RunInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesInput) GoString() string { + return s.String() +} + +// Describes the monitoring for the instance. +type RunInstancesMonitoringEnabled struct { + // Indicates whether monitoring is enabled for the instance. + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` + + metadataRunInstancesMonitoringEnabled `json:"-" xml:"-"` +} + +type metadataRunInstancesMonitoringEnabled struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s RunInstancesMonitoringEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesMonitoringEnabled) GoString() string { + return s.String() +} + +// Describes the storage parameters for S3 and S3 buckets for an instance store-backed +// AMI. +type S3Storage struct { + // The access key ID of the owner of the bucket. Before you specify a value + // for your access key ID, review and follow the guidance in Best Practices + // for Managing AWS Access Keys (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). + AWSAccessKeyId *string `type:"string"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Bucket *string `locationName:"bucket" type:"string"` + + // The beginning of the file name of the AMI. + Prefix *string `locationName:"prefix" type:"string"` + + // A Base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission + // to upload items into Amazon S3 on your behalf. + UploadPolicy []byte `locationName:"uploadPolicy" type:"blob"` + + // The signature of the Base64 encoded JSON document. + UploadPolicySignature *string `locationName:"uploadPolicySignature" type:"string"` + + metadataS3Storage `json:"-" xml:"-"` +} + +type metadataS3Storage struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s S3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Storage) GoString() string { + return s.String() +} + +// Describes a security group +type SecurityGroup struct { + // A description of the security group. + Description *string `locationName:"groupDescription" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + // One or more inbound rules associated with the security group. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // [EC2-VPC] One or more outbound rules associated with the security group. + IpPermissionsEgress []*IpPermission `locationName:"ipPermissionsEgress" locationNameList:"item" type:"list"` + + // The AWS account ID of the owner of the security group. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // [EC2-VPC] The ID of the VPC for the security group. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataSecurityGroup `json:"-" xml:"-"` +} + +type metadataSecurityGroup struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroup) GoString() string { + return s.String() +} + +// Describes a snapshot. +type Snapshot struct { + // The data encryption key identifier for the snapshot. This value is a unique + // identifier that corresponds to the data encryption key that was used to encrypt + // the original volume or snapshot copy. Because data encryption keys are inherited + // by volumes created from snapshots, and vice versa, if snapshots share the + // same data encryption key identifier, then they belong to the same volume/snapshot + // lineage. This parameter is only returned by the DescribeSnapshots API operation. + DataEncryptionKeyId *string `locationName:"dataEncryptionKeyId" type:"string"` + + // The description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to protect the volume encryption key for the parent + // volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The AWS account alias (for example, amazon, self) or AWS account ID that + // owns the snapshot. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The AWS account ID of the EBS snapshot owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The progress of the snapshot, as a percentage. + Progress *string `locationName:"progress" type:"string"` + + // The ID of the snapshot. Each snapshot receives a unique identifier when it + // is created. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The time stamp when the snapshot was initiated. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The snapshot state. + State *string `locationName:"status" type:"string" enum:"SnapshotState"` + + // Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy + // operation fails (for example, if the proper AWS Key Management Service (AWS + // KMS) permissions are not obtained) this field displays error state details + // to help you diagnose why the error occurred. This parameter is only returned + // by the DescribeSnapshots API operation. + StateMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume that was used to create the snapshot. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The size of the volume, in GiB. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` + + metadataSnapshot `json:"-" xml:"-"` +} + +type metadataSnapshot struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Describes the snapshot created from the imported disk. +type SnapshotDetail struct { + // A description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // The block device mapping for the snapshot. + DeviceName *string `locationName:"deviceName" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of progress for the task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status of the snapshot creation. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the snapshot creation. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL used to access the disk image. + Url *string `locationName:"url" type:"string"` + + // Describes the S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` + + metadataSnapshotDetail `json:"-" xml:"-"` +} + +type metadataSnapshotDetail struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SnapshotDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDetail) GoString() string { + return s.String() +} + +// The disk container object for the import snapshot request. +type SnapshotDiskContainer struct { + // The description of the disk image being imported. + Description *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: RAW | VHD | VMDK | OVA + Format *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. It can either be + // a https URL (https://..) or an Amazon S3 URL (s3://..). + Url *string `type:"string"` + + // Describes the S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` + + metadataSnapshotDiskContainer `json:"-" xml:"-"` +} + +type metadataSnapshotDiskContainer struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SnapshotDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDiskContainer) GoString() string { + return s.String() +} + +// Details about the import snapshot task. +type SnapshotTaskDetail struct { + // The description of the snapshot. + Description *string `locationName:"description" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of completion for the import snapshot task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status for the import snapshot task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the import snapshot task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL of the disk image from which the snapshot is created. + Url *string `locationName:"url" type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` + + metadataSnapshotTaskDetail `json:"-" xml:"-"` +} + +type metadataSnapshotTaskDetail struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SnapshotTaskDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotTaskDetail) GoString() string { + return s.String() +} + +// Describes the data feed for a Spot instance. +type SpotDatafeedSubscription struct { + // The Amazon S3 bucket where the Spot instance data feed is located. + Bucket *string `locationName:"bucket" type:"string"` + + // The fault codes for the Spot instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The AWS account ID of the account. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The prefix that is prepended to data feed files. + Prefix *string `locationName:"prefix" type:"string"` + + // The state of the Spot instance data feed subscription. + State *string `locationName:"state" type:"string" enum:"DatafeedSubscriptionState"` + + metadataSpotDatafeedSubscription `json:"-" xml:"-"` +} + +type metadataSpotDatafeedSubscription struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotDatafeedSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotDatafeedSubscription) GoString() string { + return s.String() +} + +// Describes the launch specification for one or more Spot instances. +type SpotFleetLaunchSpecification struct { + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The bid price per unit hour for the specified instance type. If this value + // is not specified, the default is the Spot bid price specified for the fleet. + // To determine the bid price per unit hour, divide the Spot bid price by the + // value of WeightedCapacity. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` + + // The number of units provided by the specified instance type. These are the + // same units that you chose to set the target capacity in terms (instances + // or a performance characteristic such as vCPUs, memory, or I/O). + // + // If the target capacity divided by this value is not a whole number, we round + // the number of instances to the next whole number. If this value is not specified, + // the default is 1. + WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` + + metadataSpotFleetLaunchSpecification `json:"-" xml:"-"` +} + +type metadataSpotFleetLaunchSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotFleetLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetLaunchSpecification) GoString() string { + return s.String() +} + +// Describes whether monitoring is enabled. +type SpotFleetMonitoring struct { + // Enables monitoring for the instance. + // + // Default: false + Enabled *bool `locationName:"enabled" type:"boolean"` + + metadataSpotFleetMonitoring `json:"-" xml:"-"` +} + +type metadataSpotFleetMonitoring struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotFleetMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetMonitoring) GoString() string { + return s.String() +} + +// Describes a Spot fleet request. +type SpotFleetRequestConfig struct { + // The creation date and time of the request. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Information about the configuration of the Spot fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The state of the Spot fleet request. + SpotFleetRequestState *string `locationName:"spotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + metadataSpotFleetRequestConfig `json:"-" xml:"-"` +} + +type metadataSpotFleetRequestConfig struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotFleetRequestConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfig) GoString() string { + return s.String() +} + +// Describes the configuration of a Spot fleet request. +type SpotFleetRequestConfigData struct { + // Indicates how to allocate the target capacity across the Spot pools specified + // by the Spot fleet request. The default is lowestPrice. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` + + // A unique, case-sensitive identifier you provide to ensure idempotency of + // your listings. This helps avoid duplicate listings. For more information, + // see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // Grants the Spot fleet permission to terminate Spot instances on your behalf + // when you cancel its Spot fleet request using CancelSpotFleetRequests or when + // the Spot fleet request expires, if you set terminateInstancesWithExpiration. + IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` + + // Information about the launch specifications for the Spot fleet request. + LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" min:"1" type:"list" required:"true"` + + // The bid price per unit hour. + SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"` + + // The number of units to request. You can choose to set the target capacity + // in terms of instances or a performance characteristic that is important to + // your application workload, such as vCPUs, memory, or I/O. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer" required:"true"` + + // Indicates whether running Spot instances should be terminated when the Spot + // fleet request expires. + TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` + + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The default is to start fulfilling the request immediately. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // At this point, no new Spot instance requests are placed or enabled to fulfill + // the request. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` + + metadataSpotFleetRequestConfigData `json:"-" xml:"-"` +} + +type metadataSpotFleetRequestConfigData struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotFleetRequestConfigData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfigData) GoString() string { + return s.String() +} + +// Describes a Spot instance request. +type SpotInstanceRequest struct { + // If you specified a duration and your Spot instance request was fulfilled, + // this is the fixed hourly price in effect for the Spot instance while it runs. + ActualBlockHourlyPrice *string `locationName:"actualBlockHourlyPrice" type:"string"` + + // The Availability Zone group. If you specify the same Availability Zone group + // for all Spot instance requests, all Spot instances are launched in the same + // Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The duration for the Spot instance, in minutes. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // The date and time when the Spot instance request was created, in UTC format + // (for example, YYYY-MM-DDTHH:MM:SSZ). + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // The fault codes for the Spot instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The instance ID, if an instance has been launched to fulfill the Spot instance + // request. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance launch group. Launch groups are Spot instances that launch together + // and terminate together. + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Additional information for launching instances. + LaunchSpecification *LaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The Availability Zone in which the bid is launched. + LaunchedAvailabilityZone *string `locationName:"launchedAvailabilityZone" type:"string"` + + // The product description associated with the Spot instance. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The maximum hourly price (bid) for the Spot instance launched to fulfill + // the request. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The state of the Spot instance request. Spot bid status information can help + // you track your Spot instance requests. For more information, see Spot Bid + // Status (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon Elastic Compute Cloud User Guide. + State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` + + // The status code and status message describing the Spot instance request. + Status *SpotInstanceStatus `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The Spot instance request type. + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The request becomes active at this date and time. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // If this is a one-time request, it remains active until all instances launch, + // the request is canceled, or this date is reached. If the request is persistent, + // it remains active until it is canceled or this date is reached. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` + + metadataSpotInstanceRequest `json:"-" xml:"-"` +} + +type metadataSpotInstanceRequest struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceRequest) GoString() string { + return s.String() +} + +// Describes a Spot instance state change. +type SpotInstanceStateFault struct { + // The reason code for the Spot instance state change. + Code *string `locationName:"code" type:"string"` + + // The message for the Spot instance state change. + Message *string `locationName:"message" type:"string"` + + metadataSpotInstanceStateFault `json:"-" xml:"-"` +} + +type metadataSpotInstanceStateFault struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotInstanceStateFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStateFault) GoString() string { + return s.String() +} + +// Describes the status of a Spot instance request. +type SpotInstanceStatus struct { + // The status code. For a list of status codes, see Spot Bid Status Codes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) + // in the Amazon Elastic Compute Cloud User Guide. + Code *string `locationName:"code" type:"string"` + + // The description for the status code. + Message *string `locationName:"message" type:"string"` + + // The date and time of the most recent status update, in UTC format (for example, + // YYYY-MM-DDTHH:MM:SSZ). + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` + + metadataSpotInstanceStatus `json:"-" xml:"-"` +} + +type metadataSpotInstanceStatus struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotInstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStatus) GoString() string { + return s.String() +} + +// Describes Spot instance placement. +type SpotPlacement struct { + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group (for cluster instances). + GroupName *string `locationName:"groupName" type:"string"` + + metadataSpotPlacement `json:"-" xml:"-"` +} + +type metadataSpotPlacement struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPlacement) GoString() string { + return s.String() +} + +// Describes the maximum hourly price (bid) for any Spot instance launched to +// fulfill the request. +type SpotPrice struct { + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // A general description of the AMI. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The maximum price (bid) that you are willing to pay for a Spot instance. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` + + metadataSpotPrice `json:"-" xml:"-"` +} + +type metadataSpotPrice struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s SpotPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPrice) GoString() string { + return s.String() +} + +type StartInstancesInput struct { + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + metadataStartInstancesInput `json:"-" xml:"-"` +} + +type metadataStartInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s StartInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesInput) GoString() string { + return s.String() +} + +type StartInstancesOutput struct { + // Information about one or more started instances. + StartingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` + + metadataStartInstancesOutput `json:"-" xml:"-"` +} + +type metadataStartInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s StartInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesOutput) GoString() string { + return s.String() +} + +// Describes a state change. +type StateReason struct { + // The reason code for the state change. + Code *string `locationName:"code" type:"string"` + + // The message for the state change. + // + // Server.SpotInstanceTermination: A Spot Instance was terminated due to an + // increase in the market price. + // + // Server.InternalError: An internal error occurred during instance launch, + // resulting in termination. + // + // Server.InsufficientInstanceCapacity: There was insufficient instance capacity + // to satisfy the launch request. + // + // Client.InternalError: A client error caused the instance to terminate on + // launch. + // + // Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown + // -h command from the instance. + // + // Client.UserInitiatedShutdown: The instance was shut down using the Amazon + // EC2 API. + // + // Client.VolumeLimitExceeded: The volume limit was exceeded. + // + // Client.InvalidSnapshot.NotFound: The specified snapshot was not found. + Message *string `locationName:"message" type:"string"` + + metadataStateReason `json:"-" xml:"-"` +} + +type metadataStateReason struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s StateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StateReason) GoString() string { + return s.String() +} + +type StopInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces the instances to stop. The instances do not have an opportunity to + // flush file system caches or file system metadata. If you use this option, + // you must perform file system check and repair procedures. This option is + // not recommended for Windows instances. + // + // Default: false + Force *bool `locationName:"force" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + metadataStopInstancesInput `json:"-" xml:"-"` +} + +type metadataStopInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s StopInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesInput) GoString() string { + return s.String() +} + +type StopInstancesOutput struct { + // Information about one or more stopped instances. + StoppingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` + + metadataStopInstancesOutput `json:"-" xml:"-"` +} + +type metadataStopInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s StopInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesOutput) GoString() string { + return s.String() +} + +// Describes the storage location for an instance store-backed AMI. +type Storage struct { + // An Amazon S3 storage location. + S3 *S3Storage `type:"structure"` + + metadataStorage `json:"-" xml:"-"` +} + +type metadataStorage struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Storage) GoString() string { + return s.String() +} + +// Describes a subnet. +type Subnet struct { + // The Availability Zone of the subnet. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of unused IP addresses in the subnet. Note that the IP addresses + // for any stopped instances are considered unavailable. + AvailableIpAddressCount *int64 `locationName:"availableIpAddressCount" type:"integer"` + + // The CIDR block assigned to the subnet. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether this is the default subnet for the Availability Zone. + DefaultForAz *bool `locationName:"defaultForAz" type:"boolean"` + + // Indicates whether instances launched in this subnet receive a public IP address. + MapPublicIpOnLaunch *bool `locationName:"mapPublicIpOnLaunch" type:"boolean"` + + // The current state of the subnet. + State *string `locationName:"state" type:"string" enum:"SubnetState"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the subnet. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC the subnet is in. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataSubnet `json:"-" xml:"-"` +} + +type metadataSubnet struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// Describes a tag. +type Tag struct { + // The key of the tag. + // + // Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode + // characters. May not begin with aws: + Key *string `locationName:"key" type:"string"` + + // The value of the tag. + // + // Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode + // characters. + Value *string `locationName:"value" type:"string"` + + metadataTag `json:"-" xml:"-"` +} + +type metadataTag struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Describes a tag. +type TagDescription struct { + // The tag key. + Key *string `locationName:"key" type:"string"` + + // The ID of the resource. For example, ami-1a2b3c4d. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tag value. + Value *string `locationName:"value" type:"string"` + + metadataTagDescription `json:"-" xml:"-"` +} + +type metadataTagDescription struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +type TerminateInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + metadataTerminateInstancesInput `json:"-" xml:"-"` +} + +type metadataTerminateInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s TerminateInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesInput) GoString() string { + return s.String() +} + +type TerminateInstancesOutput struct { + // Information about one or more terminated instances. + TerminatingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` + + metadataTerminateInstancesOutput `json:"-" xml:"-"` +} + +type metadataTerminateInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s TerminateInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesOutput) GoString() string { + return s.String() +} + +type UnassignPrivateIpAddressesInput struct { + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The secondary private IP addresses to unassign from the network interface. + // You can specify this option multiple times to unassign more than one IP address. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list" required:"true"` + + metadataUnassignPrivateIpAddressesInput `json:"-" xml:"-"` +} + +type metadataUnassignPrivateIpAddressesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +type UnassignPrivateIpAddressesOutput struct { + metadataUnassignPrivateIpAddressesOutput `json:"-" xml:"-"` +} + +type metadataUnassignPrivateIpAddressesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +type UnmonitorInstancesInput struct { + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + metadataUnmonitorInstancesInput `json:"-" xml:"-"` +} + +type metadataUnmonitorInstancesInput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UnmonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesInput) GoString() string { + return s.String() +} + +type UnmonitorInstancesOutput struct { + // Monitoring information for one or more instances. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` + + metadataUnmonitorInstancesOutput `json:"-" xml:"-"` +} + +type metadataUnmonitorInstancesOutput struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UnmonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesOutput) GoString() string { + return s.String() +} + +// Information about items that were not successfully processed in a batch call. +type UnsuccessfulItem struct { + // Information about the error. + Error *UnsuccessfulItemError `locationName:"error" type:"structure" required:"true"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + metadataUnsuccessfulItem `json:"-" xml:"-"` +} + +type metadataUnsuccessfulItem struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UnsuccessfulItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItem) GoString() string { + return s.String() +} + +// Information about the error that occurred. For more information about errors, +// see Error Codes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). +type UnsuccessfulItemError struct { + // The error code. + Code *string `locationName:"code" type:"string" required:"true"` + + // The error message accompanying the error code. + Message *string `locationName:"message" type:"string" required:"true"` + + metadataUnsuccessfulItemError `json:"-" xml:"-"` +} + +type metadataUnsuccessfulItemError struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UnsuccessfulItemError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItemError) GoString() string { + return s.String() +} + +// Describes the S3 bucket for the disk image. +type UserBucket struct { + // The name of the S3 bucket where the disk image is located. + S3Bucket *string `type:"string"` + + // The key for the disk image. + S3Key *string `type:"string"` + + metadataUserBucket `json:"-" xml:"-"` +} + +type metadataUserBucket struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UserBucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucket) GoString() string { + return s.String() +} + +// Describes the S3 bucket for the disk image. +type UserBucketDetails struct { + // The S3 bucket from which the disk image was created. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The key from which the disk image was created. + S3Key *string `locationName:"s3Key" type:"string"` + + metadataUserBucketDetails `json:"-" xml:"-"` +} + +type metadataUserBucketDetails struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UserBucketDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucketDetails) GoString() string { + return s.String() +} + +// Describes the user data to be made available to an instance. +type UserData struct { + // The Base64-encoded MIME user data for the instance. + Data *string `locationName:"data" type:"string"` + + metadataUserData `json:"-" xml:"-"` +} + +type metadataUserData struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UserData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserData) GoString() string { + return s.String() +} + +// Describes a security group and AWS account ID pair. +type UserIdGroupPair struct { + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. In a request, use this parameter for a security + // group in EC2-Classic or a default VPC only. For a security group in a nondefault + // VPC, use GroupId. + GroupName *string `locationName:"groupName" type:"string"` + + // The ID of an AWS account. EC2-Classic only. + UserId *string `locationName:"userId" type:"string"` + + metadataUserIdGroupPair `json:"-" xml:"-"` +} + +type metadataUserIdGroupPair struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s UserIdGroupPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserIdGroupPair) GoString() string { + return s.String() +} + +// Describes telemetry for a VPN tunnel. +type VgwTelemetry struct { + // The number of accepted routes. + AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + + // The date and time of the last change in status. + LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp" timestampFormat:"iso8601"` + + // The Internet-routable IP address of the virtual private gateway's outside + // interface. + OutsideIpAddress *string `locationName:"outsideIpAddress" type:"string"` + + // The status of the VPN tunnel. + Status *string `locationName:"status" type:"string" enum:"TelemetryStatus"` + + // If an error occurs, a description of the error. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + metadataVgwTelemetry `json:"-" xml:"-"` +} + +type metadataVgwTelemetry struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VgwTelemetry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VgwTelemetry) GoString() string { + return s.String() +} + +// Describes a volume. +type Volume struct { + // Information about the volume attachments. + Attachments []*VolumeAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The Availability Zone for the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time stamp when volume creation was initiated. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the volume will be encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information on General Purpose (SSD) baseline + // performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to protect the volume encryption key for the volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The size of the volume, in GiBs. + Size *int64 `locationName:"size" type:"integer"` + + // The snapshot from which the volume was created, if applicable. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The volume state. + State *string `locationName:"status" type:"string" enum:"VolumeState"` + + // Any tags assigned to the volume. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for + // Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes. + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` + + metadataVolume `json:"-" xml:"-"` +} + +type metadataVolume struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Describes volume attachment details. +type VolumeAttachment struct { + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device name. + Device *string `locationName:"device" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The attachment state of the volume. + State *string `locationName:"status" type:"string" enum:"VolumeAttachmentState"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + metadataVolumeAttachment `json:"-" xml:"-"` +} + +type metadataVolumeAttachment struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VolumeAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeAttachment) GoString() string { + return s.String() +} + +// Describes an EBS volume. +type VolumeDetail struct { + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long" required:"true"` + + metadataVolumeDetail `json:"-" xml:"-"` +} + +type metadataVolumeDetail struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VolumeDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeDetail) GoString() string { + return s.String() +} + +// Describes a volume status operation code. +type VolumeStatusAction struct { + // The code identifying the operation, for example, enable-volume-io. + Code *string `locationName:"code" type:"string"` + + // A description of the operation. + Description *string `locationName:"description" type:"string"` + + // The ID of the event associated with this operation. + EventId *string `locationName:"eventId" type:"string"` + + // The event type associated with this operation. + EventType *string `locationName:"eventType" type:"string"` + + metadataVolumeStatusAction `json:"-" xml:"-"` +} + +type metadataVolumeStatusAction struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusAction) GoString() string { + return s.String() +} + +// Describes a volume status. +type VolumeStatusDetails struct { + // The name of the volume status. + Name *string `locationName:"name" type:"string" enum:"VolumeStatusName"` + + // The intended status of the volume status. + Status *string `locationName:"status" type:"string"` + + metadataVolumeStatusDetails `json:"-" xml:"-"` +} + +type metadataVolumeStatusDetails struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusDetails) GoString() string { + return s.String() +} + +// Describes a volume status event. +type VolumeStatusEvent struct { + // A description of the event. + Description *string `locationName:"description" type:"string"` + + // The ID of this event. + EventId *string `locationName:"eventId" type:"string"` + + // The type of this event. + EventType *string `locationName:"eventType" type:"string"` + + // The latest end time of the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + + // The earliest start time of the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` + + metadataVolumeStatusEvent `json:"-" xml:"-"` +} + +type metadataVolumeStatusEvent struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusEvent) GoString() string { + return s.String() +} + +// Describes the status of a volume. +type VolumeStatusInfo struct { + // The details of the volume status. + Details []*VolumeStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status of the volume. + Status *string `locationName:"status" type:"string" enum:"VolumeStatusInfoStatus"` + + metadataVolumeStatusInfo `json:"-" xml:"-"` +} + +type metadataVolumeStatusInfo struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusInfo) GoString() string { + return s.String() +} + +// Describes the volume status. +type VolumeStatusItem struct { + // The details of the operation. + Actions []*VolumeStatusAction `locationName:"actionsSet" locationNameList:"item" type:"list"` + + // The Availability Zone of the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A list of events associated with the volume. + Events []*VolumeStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The volume ID. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume status. + VolumeStatus *VolumeStatusInfo `locationName:"volumeStatus" type:"structure"` + + metadataVolumeStatusItem `json:"-" xml:"-"` +} + +type metadataVolumeStatusItem struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusItem) GoString() string { + return s.String() +} + +// Describes a VPC. +type Vpc struct { + // The CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The ID of the set of DHCP options you've associated with the VPC (or default + // if the default options are associated with the VPC). + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // The allowed tenancy of instances launched into the VPC. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // Indicates whether the VPC is the default VPC. + IsDefault *bool `locationName:"isDefault" type:"boolean"` + + // The current state of the VPC. + State *string `locationName:"state" type:"string" enum:"VpcState"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataVpc `json:"-" xml:"-"` +} + +type metadataVpc struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s Vpc) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vpc) GoString() string { + return s.String() +} + +// Describes an attachment between a virtual private gateway and a VPC. +type VpcAttachment struct { + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataVpcAttachment `json:"-" xml:"-"` +} + +type metadataVpcAttachment struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpcAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcAttachment) GoString() string { + return s.String() +} + +// Describes whether a VPC is enabled for ClassicLink. +type VpcClassicLink struct { + // Indicates whether the VPC is enabled for ClassicLink. + ClassicLinkEnabled *bool `locationName:"classicLinkEnabled" type:"boolean"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataVpcClassicLink `json:"-" xml:"-"` +} + +type metadataVpcClassicLink struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpcClassicLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcClassicLink) GoString() string { + return s.String() +} + +// Describes a VPC endpoint. +type VpcEndpoint struct { + // The date and time the VPC endpoint was created. + CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp" timestampFormat:"iso8601"` + + // The policy document associated with the endpoint. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // One or more route tables associated with the endpoint. + RouteTableIds []*string `locationName:"routeTableIdSet" locationNameList:"item" type:"list"` + + // The name of the AWS service to which the endpoint is associated. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The state of the VPC endpoint. + State *string `locationName:"state" type:"string" enum:"State"` + + // The ID of the VPC endpoint. + VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"` + + // The ID of the VPC to which the endpoint is associated. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataVpcEndpoint `json:"-" xml:"-"` +} + +type metadataVpcEndpoint struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpcEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcEndpoint) GoString() string { + return s.String() +} + +// Describes a VPC peering connection. +type VpcPeeringConnection struct { + // The information of the peer VPC. + AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"` + + // The time that an unaccepted VPC peering connection will expire. + ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"` + + // The information of the requester VPC. + RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"` + + // The status of the VPC peering connection. + Status *VpcPeeringConnectionStateReason `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` + + metadataVpcPeeringConnection `json:"-" xml:"-"` +} + +type metadataVpcPeeringConnection struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpcPeeringConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnection) GoString() string { + return s.String() +} + +// Describes the status of a VPC peering connection. +type VpcPeeringConnectionStateReason struct { + // The status of the VPC peering connection. + Code *string `locationName:"code" type:"string" enum:"VpcPeeringConnectionStateReasonCode"` + + // A message that provides more information about the status, if applicable. + Message *string `locationName:"message" type:"string"` + + metadataVpcPeeringConnectionStateReason `json:"-" xml:"-"` +} + +type metadataVpcPeeringConnectionStateReason struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpcPeeringConnectionStateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionStateReason) GoString() string { + return s.String() +} + +// Describes a VPC in a VPC peering connection. +type VpcPeeringConnectionVpcInfo struct { + // The CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The AWS account ID of the VPC owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + metadataVpcPeeringConnectionVpcInfo `json:"-" xml:"-"` +} + +type metadataVpcPeeringConnectionVpcInfo struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpcPeeringConnectionVpcInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionVpcInfo) GoString() string { + return s.String() +} + +// Describes a VPN connection. +type VpnConnection struct { + // The configuration information for the VPN connection's customer gateway (in + // the native XML format). This element is always present in the CreateVpnConnection + // response; however, it's present in the DescribeVpnConnections response only + // if the VPN connection is in the pending or available state. + CustomerGatewayConfiguration *string `locationName:"customerGatewayConfiguration" type:"string"` + + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The VPN connection options. + Options *VpnConnectionOptions `locationName:"options" type:"structure"` + + // The static routes associated with the VPN connection. + Routes []*VpnStaticRoute `locationName:"routes" locationNameList:"item" type:"list"` + + // The current state of the VPN connection. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the VPN connection. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Information about the VPN tunnel. + VgwTelemetry []*VgwTelemetry `locationName:"vgwTelemetry" locationNameList:"item" type:"list"` + + // The ID of the VPN connection. + VpnConnectionId *string `locationName:"vpnConnectionId" type:"string"` + + // The ID of the virtual private gateway at the AWS side of the VPN connection. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` + + metadataVpnConnection `json:"-" xml:"-"` +} + +type metadataVpnConnection struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpnConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnection) GoString() string { + return s.String() +} + +// Describes VPN connection options. +type VpnConnectionOptions struct { + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + + metadataVpnConnectionOptions `json:"-" xml:"-"` +} + +type metadataVpnConnectionOptions struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpnConnectionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptions) GoString() string { + return s.String() +} + +// Describes VPN connection options. +type VpnConnectionOptionsSpecification struct { + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + + metadataVpnConnectionOptionsSpecification `json:"-" xml:"-"` +} + +type metadataVpnConnectionOptionsSpecification struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpnConnectionOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptionsSpecification) GoString() string { + return s.String() +} + +// Describes a virtual private gateway. +type VpnGateway struct { + // The Availability Zone where the virtual private gateway was created. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The current state of the virtual private gateway. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the virtual private gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the virtual private gateway supports. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Any VPCs attached to the virtual private gateway. + VpcAttachments []*VpcAttachment `locationName:"attachments" locationNameList:"item" type:"list"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` + + metadataVpnGateway `json:"-" xml:"-"` +} + +type metadataVpnGateway struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpnGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnGateway) GoString() string { + return s.String() +} + +// Describes a static route for a VPN connection. +type VpnStaticRoute struct { + // The CIDR block associated with the local subnet of the customer data center. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // Indicates how the routes were provided. + Source *string `locationName:"source" type:"string" enum:"VpnStaticRouteSource"` + + // The current state of the static route. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + metadataVpnStaticRoute `json:"-" xml:"-"` +} + +type metadataVpnStaticRoute struct { + SDKShapeTraits bool `type:"structure"` +} + +// String returns the string representation +func (s VpnStaticRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnStaticRoute) GoString() string { + return s.String() +} + +const ( + // @enum AccountAttributeName + AccountAttributeNameSupportedPlatforms = "supported-platforms" + // @enum AccountAttributeName + AccountAttributeNameDefaultVpc = "default-vpc" +) + +const ( + // @enum AllocationStrategy + AllocationStrategyLowestPrice = "lowestPrice" + // @enum AllocationStrategy + AllocationStrategyDiversified = "diversified" +) + +const ( + // @enum ArchitectureValues + ArchitectureValuesI386 = "i386" + // @enum ArchitectureValues + ArchitectureValuesX8664 = "x86_64" +) + +const ( + // @enum AttachmentStatus + AttachmentStatusAttaching = "attaching" + // @enum AttachmentStatus + AttachmentStatusAttached = "attached" + // @enum AttachmentStatus + AttachmentStatusDetaching = "detaching" + // @enum AttachmentStatus + AttachmentStatusDetached = "detached" +) + +const ( + // @enum AvailabilityZoneState + AvailabilityZoneStateAvailable = "available" + // @enum AvailabilityZoneState + AvailabilityZoneStateInformation = "information" + // @enum AvailabilityZoneState + AvailabilityZoneStateImpaired = "impaired" + // @enum AvailabilityZoneState + AvailabilityZoneStateUnavailable = "unavailable" +) + +const ( + // @enum BatchState + BatchStateSubmitted = "submitted" + // @enum BatchState + BatchStateActive = "active" + // @enum BatchState + BatchStateCancelled = "cancelled" + // @enum BatchState + BatchStateFailed = "failed" + // @enum BatchState + BatchStateCancelledRunning = "cancelled_running" + // @enum BatchState + BatchStateCancelledTerminating = "cancelled_terminating" + // @enum BatchState + BatchStateModifying = "modifying" +) + +const ( + // @enum BundleTaskState + BundleTaskStatePending = "pending" + // @enum BundleTaskState + BundleTaskStateWaitingForShutdown = "waiting-for-shutdown" + // @enum BundleTaskState + BundleTaskStateBundling = "bundling" + // @enum BundleTaskState + BundleTaskStateStoring = "storing" + // @enum BundleTaskState + BundleTaskStateCancelling = "cancelling" + // @enum BundleTaskState + BundleTaskStateComplete = "complete" + // @enum BundleTaskState + BundleTaskStateFailed = "failed" +) + +const ( + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestIdMalformed = "fleetRequestIdMalformed" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestNotInCancellableState = "fleetRequestNotInCancellableState" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeUnexpectedError = "unexpectedError" +) + +const ( + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateActive = "active" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateOpen = "open" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateClosed = "closed" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateCancelled = "cancelled" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateCompleted = "completed" +) + +const ( + // @enum ContainerFormat + ContainerFormatOva = "ova" +) + +const ( + // @enum ConversionTaskState + ConversionTaskStateActive = "active" + // @enum ConversionTaskState + ConversionTaskStateCancelling = "cancelling" + // @enum ConversionTaskState + ConversionTaskStateCancelled = "cancelled" + // @enum ConversionTaskState + ConversionTaskStateCompleted = "completed" +) + +const ( + // @enum CurrencyCodeValues + CurrencyCodeValuesUsd = "USD" +) + +const ( + // @enum DatafeedSubscriptionState + DatafeedSubscriptionStateActive = "Active" + // @enum DatafeedSubscriptionState + DatafeedSubscriptionStateInactive = "Inactive" +) + +const ( + // @enum DeviceType + DeviceTypeEbs = "ebs" + // @enum DeviceType + DeviceTypeInstanceStore = "instance-store" +) + +const ( + // @enum DiskImageFormat + DiskImageFormatVmdk = "VMDK" + // @enum DiskImageFormat + DiskImageFormatRaw = "RAW" + // @enum DiskImageFormat + DiskImageFormatVhd = "VHD" +) + +const ( + // @enum DomainType + DomainTypeVpc = "vpc" + // @enum DomainType + DomainTypeStandard = "standard" +) + +const ( + // @enum EventCode + EventCodeInstanceReboot = "instance-reboot" + // @enum EventCode + EventCodeSystemReboot = "system-reboot" + // @enum EventCode + EventCodeSystemMaintenance = "system-maintenance" + // @enum EventCode + EventCodeInstanceRetirement = "instance-retirement" + // @enum EventCode + EventCodeInstanceStop = "instance-stop" +) + +const ( + // @enum EventType + EventTypeInstanceChange = "instanceChange" + // @enum EventType + EventTypeFleetRequestChange = "fleetRequestChange" + // @enum EventType + EventTypeError = "error" +) + +const ( + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyNoTermination = "noTermination" + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyDefault = "default" +) + +const ( + // @enum ExportEnvironment + ExportEnvironmentCitrix = "citrix" + // @enum ExportEnvironment + ExportEnvironmentVmware = "vmware" + // @enum ExportEnvironment + ExportEnvironmentMicrosoft = "microsoft" +) + +const ( + // @enum ExportTaskState + ExportTaskStateActive = "active" + // @enum ExportTaskState + ExportTaskStateCancelling = "cancelling" + // @enum ExportTaskState + ExportTaskStateCancelled = "cancelled" + // @enum ExportTaskState + ExportTaskStateCompleted = "completed" +) + +const ( + // @enum FlowLogsResourceType + FlowLogsResourceTypeVpc = "VPC" + // @enum FlowLogsResourceType + FlowLogsResourceTypeSubnet = "Subnet" + // @enum FlowLogsResourceType + FlowLogsResourceTypeNetworkInterface = "NetworkInterface" +) + +const ( + // @enum GatewayType + GatewayTypeIpsec1 = "ipsec.1" +) + +const ( + // @enum HypervisorType + HypervisorTypeOvm = "ovm" + // @enum HypervisorType + HypervisorTypeXen = "xen" +) + +const ( + // @enum ImageAttributeName + ImageAttributeNameDescription = "description" + // @enum ImageAttributeName + ImageAttributeNameKernel = "kernel" + // @enum ImageAttributeName + ImageAttributeNameRamdisk = "ramdisk" + // @enum ImageAttributeName + ImageAttributeNameLaunchPermission = "launchPermission" + // @enum ImageAttributeName + ImageAttributeNameProductCodes = "productCodes" + // @enum ImageAttributeName + ImageAttributeNameBlockDeviceMapping = "blockDeviceMapping" + // @enum ImageAttributeName + ImageAttributeNameSriovNetSupport = "sriovNetSupport" +) + +const ( + // @enum ImageState + ImageStatePending = "pending" + // @enum ImageState + ImageStateAvailable = "available" + // @enum ImageState + ImageStateInvalid = "invalid" + // @enum ImageState + ImageStateDeregistered = "deregistered" + // @enum ImageState + ImageStateTransient = "transient" + // @enum ImageState + ImageStateFailed = "failed" + // @enum ImageState + ImageStateError = "error" +) + +const ( + // @enum ImageTypeValues + ImageTypeValuesMachine = "machine" + // @enum ImageTypeValues + ImageTypeValuesKernel = "kernel" + // @enum ImageTypeValues + ImageTypeValuesRamdisk = "ramdisk" +) + +const ( + // @enum InstanceAttributeName + InstanceAttributeNameInstanceType = "instanceType" + // @enum InstanceAttributeName + InstanceAttributeNameKernel = "kernel" + // @enum InstanceAttributeName + InstanceAttributeNameRamdisk = "ramdisk" + // @enum InstanceAttributeName + InstanceAttributeNameUserData = "userData" + // @enum InstanceAttributeName + InstanceAttributeNameDisableApiTermination = "disableApiTermination" + // @enum InstanceAttributeName + InstanceAttributeNameInstanceInitiatedShutdownBehavior = "instanceInitiatedShutdownBehavior" + // @enum InstanceAttributeName + InstanceAttributeNameRootDeviceName = "rootDeviceName" + // @enum InstanceAttributeName + InstanceAttributeNameBlockDeviceMapping = "blockDeviceMapping" + // @enum InstanceAttributeName + InstanceAttributeNameProductCodes = "productCodes" + // @enum InstanceAttributeName + InstanceAttributeNameSourceDestCheck = "sourceDestCheck" + // @enum InstanceAttributeName + InstanceAttributeNameGroupSet = "groupSet" + // @enum InstanceAttributeName + InstanceAttributeNameEbsOptimized = "ebsOptimized" + // @enum InstanceAttributeName + InstanceAttributeNameSriovNetSupport = "sriovNetSupport" +) + +const ( + // @enum InstanceLifecycleType + InstanceLifecycleTypeSpot = "spot" +) + +const ( + // @enum InstanceStateName + InstanceStateNamePending = "pending" + // @enum InstanceStateName + InstanceStateNameRunning = "running" + // @enum InstanceStateName + InstanceStateNameShuttingDown = "shutting-down" + // @enum InstanceStateName + InstanceStateNameTerminated = "terminated" + // @enum InstanceStateName + InstanceStateNameStopping = "stopping" + // @enum InstanceStateName + InstanceStateNameStopped = "stopped" +) + +const ( + // @enum InstanceType + InstanceTypeT1Micro = "t1.micro" + // @enum InstanceType + InstanceTypeM1Small = "m1.small" + // @enum InstanceType + InstanceTypeM1Medium = "m1.medium" + // @enum InstanceType + InstanceTypeM1Large = "m1.large" + // @enum InstanceType + InstanceTypeM1Xlarge = "m1.xlarge" + // @enum InstanceType + InstanceTypeM3Medium = "m3.medium" + // @enum InstanceType + InstanceTypeM3Large = "m3.large" + // @enum InstanceType + InstanceTypeM3Xlarge = "m3.xlarge" + // @enum InstanceType + InstanceTypeM32xlarge = "m3.2xlarge" + // @enum InstanceType + InstanceTypeM4Large = "m4.large" + // @enum InstanceType + InstanceTypeM4Xlarge = "m4.xlarge" + // @enum InstanceType + InstanceTypeM42xlarge = "m4.2xlarge" + // @enum InstanceType + InstanceTypeM44xlarge = "m4.4xlarge" + // @enum InstanceType + InstanceTypeM410xlarge = "m4.10xlarge" + // @enum InstanceType + InstanceTypeT2Micro = "t2.micro" + // @enum InstanceType + InstanceTypeT2Small = "t2.small" + // @enum InstanceType + InstanceTypeT2Medium = "t2.medium" + // @enum InstanceType + InstanceTypeT2Large = "t2.large" + // @enum InstanceType + InstanceTypeM2Xlarge = "m2.xlarge" + // @enum InstanceType + InstanceTypeM22xlarge = "m2.2xlarge" + // @enum InstanceType + InstanceTypeM24xlarge = "m2.4xlarge" + // @enum InstanceType + InstanceTypeCr18xlarge = "cr1.8xlarge" + // @enum InstanceType + InstanceTypeI2Xlarge = "i2.xlarge" + // @enum InstanceType + InstanceTypeI22xlarge = "i2.2xlarge" + // @enum InstanceType + InstanceTypeI24xlarge = "i2.4xlarge" + // @enum InstanceType + InstanceTypeI28xlarge = "i2.8xlarge" + // @enum InstanceType + InstanceTypeHi14xlarge = "hi1.4xlarge" + // @enum InstanceType + InstanceTypeHs18xlarge = "hs1.8xlarge" + // @enum InstanceType + InstanceTypeC1Medium = "c1.medium" + // @enum InstanceType + InstanceTypeC1Xlarge = "c1.xlarge" + // @enum InstanceType + InstanceTypeC3Large = "c3.large" + // @enum InstanceType + InstanceTypeC3Xlarge = "c3.xlarge" + // @enum InstanceType + InstanceTypeC32xlarge = "c3.2xlarge" + // @enum InstanceType + InstanceTypeC34xlarge = "c3.4xlarge" + // @enum InstanceType + InstanceTypeC38xlarge = "c3.8xlarge" + // @enum InstanceType + InstanceTypeC4Large = "c4.large" + // @enum InstanceType + InstanceTypeC4Xlarge = "c4.xlarge" + // @enum InstanceType + InstanceTypeC42xlarge = "c4.2xlarge" + // @enum InstanceType + InstanceTypeC44xlarge = "c4.4xlarge" + // @enum InstanceType + InstanceTypeC48xlarge = "c4.8xlarge" + // @enum InstanceType + InstanceTypeCc14xlarge = "cc1.4xlarge" + // @enum InstanceType + InstanceTypeCc28xlarge = "cc2.8xlarge" + // @enum InstanceType + InstanceTypeG22xlarge = "g2.2xlarge" + // @enum InstanceType + InstanceTypeCg14xlarge = "cg1.4xlarge" + // @enum InstanceType + InstanceTypeR3Large = "r3.large" + // @enum InstanceType + InstanceTypeR3Xlarge = "r3.xlarge" + // @enum InstanceType + InstanceTypeR32xlarge = "r3.2xlarge" + // @enum InstanceType + InstanceTypeR34xlarge = "r3.4xlarge" + // @enum InstanceType + InstanceTypeR38xlarge = "r3.8xlarge" + // @enum InstanceType + InstanceTypeD2Xlarge = "d2.xlarge" + // @enum InstanceType + InstanceTypeD22xlarge = "d2.2xlarge" + // @enum InstanceType + InstanceTypeD24xlarge = "d2.4xlarge" + // @enum InstanceType + InstanceTypeD28xlarge = "d2.8xlarge" +) + +const ( + // @enum ListingState + ListingStateAvailable = "available" + // @enum ListingState + ListingStateSold = "sold" + // @enum ListingState + ListingStateCancelled = "cancelled" + // @enum ListingState + ListingStatePending = "pending" +) + +const ( + // @enum ListingStatus + ListingStatusActive = "active" + // @enum ListingStatus + ListingStatusPending = "pending" + // @enum ListingStatus + ListingStatusCancelled = "cancelled" + // @enum ListingStatus + ListingStatusClosed = "closed" +) + +const ( + // @enum MonitoringState + MonitoringStateDisabled = "disabled" + // @enum MonitoringState + MonitoringStateDisabling = "disabling" + // @enum MonitoringState + MonitoringStateEnabled = "enabled" + // @enum MonitoringState + MonitoringStatePending = "pending" +) + +const ( + // @enum MoveStatus + MoveStatusMovingToVpc = "movingToVpc" + // @enum MoveStatus + MoveStatusRestoringToClassic = "restoringToClassic" +) + +const ( + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeDescription = "description" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeGroupSet = "groupSet" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeSourceDestCheck = "sourceDestCheck" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeAttachment = "attachment" +) + +const ( + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusAvailable = "available" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusAttaching = "attaching" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusInUse = "in-use" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusDetaching = "detaching" +) + +const ( + // @enum OfferingTypeValues + OfferingTypeValuesHeavyUtilization = "Heavy Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesMediumUtilization = "Medium Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesLightUtilization = "Light Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesNoUpfront = "No Upfront" + // @enum OfferingTypeValues + OfferingTypeValuesPartialUpfront = "Partial Upfront" + // @enum OfferingTypeValues + OfferingTypeValuesAllUpfront = "All Upfront" +) + +const ( + // @enum OperationType + OperationTypeAdd = "add" + // @enum OperationType + OperationTypeRemove = "remove" +) + +const ( + // @enum PermissionGroup + PermissionGroupAll = "all" +) + +const ( + // @enum PlacementGroupState + PlacementGroupStatePending = "pending" + // @enum PlacementGroupState + PlacementGroupStateAvailable = "available" + // @enum PlacementGroupState + PlacementGroupStateDeleting = "deleting" + // @enum PlacementGroupState + PlacementGroupStateDeleted = "deleted" +) + +const ( + // @enum PlacementStrategy + PlacementStrategyCluster = "cluster" +) + +const ( + // @enum PlatformValues + PlatformValuesWindows = "Windows" +) + +const ( + // @enum ProductCodeValues + ProductCodeValuesDevpay = "devpay" + // @enum ProductCodeValues + ProductCodeValuesMarketplace = "marketplace" +) + +const ( + // @enum RIProductDescription + RIProductDescriptionLinuxUnix = "Linux/UNIX" + // @enum RIProductDescription + RIProductDescriptionLinuxUnixamazonVpc = "Linux/UNIX (Amazon VPC)" + // @enum RIProductDescription + RIProductDescriptionWindows = "Windows" + // @enum RIProductDescription + RIProductDescriptionWindowsAmazonVpc = "Windows (Amazon VPC)" +) + +const ( + // @enum RecurringChargeFrequency + RecurringChargeFrequencyHourly = "Hourly" +) + +const ( + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesUnresponsive = "unresponsive" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesNotAcceptingCredentials = "not-accepting-credentials" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPasswordNotAvailable = "password-not-available" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceNetwork = "performance-network" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceInstanceStore = "performance-instance-store" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceEbsVolume = "performance-ebs-volume" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceOther = "performance-other" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesOther = "other" +) + +const ( + // @enum ReportStatusType + ReportStatusTypeOk = "ok" + // @enum ReportStatusType + ReportStatusTypeImpaired = "impaired" +) + +const ( + // @enum ReservedInstanceState + ReservedInstanceStatePaymentPending = "payment-pending" + // @enum ReservedInstanceState + ReservedInstanceStateActive = "active" + // @enum ReservedInstanceState + ReservedInstanceStatePaymentFailed = "payment-failed" + // @enum ReservedInstanceState + ReservedInstanceStateRetired = "retired" +) + +const ( + // @enum ResetImageAttributeName + ResetImageAttributeNameLaunchPermission = "launchPermission" +) + +const ( + // @enum ResourceType + ResourceTypeCustomerGateway = "customer-gateway" + // @enum ResourceType + ResourceTypeDhcpOptions = "dhcp-options" + // @enum ResourceType + ResourceTypeImage = "image" + // @enum ResourceType + ResourceTypeInstance = "instance" + // @enum ResourceType + ResourceTypeInternetGateway = "internet-gateway" + // @enum ResourceType + ResourceTypeNetworkAcl = "network-acl" + // @enum ResourceType + ResourceTypeNetworkInterface = "network-interface" + // @enum ResourceType + ResourceTypeReservedInstances = "reserved-instances" + // @enum ResourceType + ResourceTypeRouteTable = "route-table" + // @enum ResourceType + ResourceTypeSnapshot = "snapshot" + // @enum ResourceType + ResourceTypeSpotInstancesRequest = "spot-instances-request" + // @enum ResourceType + ResourceTypeSubnet = "subnet" + // @enum ResourceType + ResourceTypeSecurityGroup = "security-group" + // @enum ResourceType + ResourceTypeVolume = "volume" + // @enum ResourceType + ResourceTypeVpc = "vpc" + // @enum ResourceType + ResourceTypeVpnConnection = "vpn-connection" + // @enum ResourceType + ResourceTypeVpnGateway = "vpn-gateway" +) + +const ( + // @enum RouteOrigin + RouteOriginCreateRouteTable = "CreateRouteTable" + // @enum RouteOrigin + RouteOriginCreateRoute = "CreateRoute" + // @enum RouteOrigin + RouteOriginEnableVgwRoutePropagation = "EnableVgwRoutePropagation" +) + +const ( + // @enum RouteState + RouteStateActive = "active" + // @enum RouteState + RouteStateBlackhole = "blackhole" +) + +const ( + // @enum RuleAction + RuleActionAllow = "allow" + // @enum RuleAction + RuleActionDeny = "deny" +) + +const ( + // @enum ShutdownBehavior + ShutdownBehaviorStop = "stop" + // @enum ShutdownBehavior + ShutdownBehaviorTerminate = "terminate" +) + +const ( + // @enum SnapshotAttributeName + SnapshotAttributeNameProductCodes = "productCodes" + // @enum SnapshotAttributeName + SnapshotAttributeNameCreateVolumePermission = "createVolumePermission" +) + +const ( + // @enum SnapshotState + SnapshotStatePending = "pending" + // @enum SnapshotState + SnapshotStateCompleted = "completed" + // @enum SnapshotState + SnapshotStateError = "error" +) + +const ( + // @enum SpotInstanceState + SpotInstanceStateOpen = "open" + // @enum SpotInstanceState + SpotInstanceStateActive = "active" + // @enum SpotInstanceState + SpotInstanceStateClosed = "closed" + // @enum SpotInstanceState + SpotInstanceStateCancelled = "cancelled" + // @enum SpotInstanceState + SpotInstanceStateFailed = "failed" +) + +const ( + // @enum SpotInstanceType + SpotInstanceTypeOneTime = "one-time" + // @enum SpotInstanceType + SpotInstanceTypePersistent = "persistent" +) + +const ( + // @enum State + StatePending = "Pending" + // @enum State + StateAvailable = "Available" + // @enum State + StateDeleting = "Deleting" + // @enum State + StateDeleted = "Deleted" +) + +const ( + // @enum Status + StatusMoveInProgress = "MoveInProgress" + // @enum Status + StatusInVpc = "InVpc" + // @enum Status + StatusInClassic = "InClassic" +) + +const ( + // @enum StatusName + StatusNameReachability = "reachability" +) + +const ( + // @enum StatusType + StatusTypePassed = "passed" + // @enum StatusType + StatusTypeFailed = "failed" + // @enum StatusType + StatusTypeInsufficientData = "insufficient-data" + // @enum StatusType + StatusTypeInitializing = "initializing" +) + +const ( + // @enum SubnetState + SubnetStatePending = "pending" + // @enum SubnetState + SubnetStateAvailable = "available" +) + +const ( + // @enum SummaryStatus + SummaryStatusOk = "ok" + // @enum SummaryStatus + SummaryStatusImpaired = "impaired" + // @enum SummaryStatus + SummaryStatusInsufficientData = "insufficient-data" + // @enum SummaryStatus + SummaryStatusNotApplicable = "not-applicable" + // @enum SummaryStatus + SummaryStatusInitializing = "initializing" +) + +const ( + // @enum TelemetryStatus + TelemetryStatusUp = "UP" + // @enum TelemetryStatus + TelemetryStatusDown = "DOWN" +) + +const ( + // @enum Tenancy + TenancyDefault = "default" + // @enum Tenancy + TenancyDedicated = "dedicated" +) + +const ( + // @enum TrafficType + TrafficTypeAccept = "ACCEPT" + // @enum TrafficType + TrafficTypeReject = "REJECT" + // @enum TrafficType + TrafficTypeAll = "ALL" +) + +const ( + // @enum VirtualizationType + VirtualizationTypeHvm = "hvm" + // @enum VirtualizationType + VirtualizationTypeParavirtual = "paravirtual" +) + +const ( + // @enum VolumeAttachmentState + VolumeAttachmentStateAttaching = "attaching" + // @enum VolumeAttachmentState + VolumeAttachmentStateAttached = "attached" + // @enum VolumeAttachmentState + VolumeAttachmentStateDetaching = "detaching" + // @enum VolumeAttachmentState + VolumeAttachmentStateDetached = "detached" +) + +const ( + // @enum VolumeAttributeName + VolumeAttributeNameAutoEnableIo = "autoEnableIO" + // @enum VolumeAttributeName + VolumeAttributeNameProductCodes = "productCodes" +) + +const ( + // @enum VolumeState + VolumeStateCreating = "creating" + // @enum VolumeState + VolumeStateAvailable = "available" + // @enum VolumeState + VolumeStateInUse = "in-use" + // @enum VolumeState + VolumeStateDeleting = "deleting" + // @enum VolumeState + VolumeStateDeleted = "deleted" + // @enum VolumeState + VolumeStateError = "error" +) + +const ( + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusOk = "ok" + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusImpaired = "impaired" + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusInsufficientData = "insufficient-data" +) + +const ( + // @enum VolumeStatusName + VolumeStatusNameIoEnabled = "io-enabled" + // @enum VolumeStatusName + VolumeStatusNameIoPerformance = "io-performance" +) + +const ( + // @enum VolumeType + VolumeTypeStandard = "standard" + // @enum VolumeType + VolumeTypeIo1 = "io1" + // @enum VolumeType + VolumeTypeGp2 = "gp2" +) + +const ( + // @enum VpcAttributeName + VpcAttributeNameEnableDnsSupport = "enableDnsSupport" + // @enum VpcAttributeName + VpcAttributeNameEnableDnsHostnames = "enableDnsHostnames" +) + +const ( + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeInitiatingRequest = "initiating-request" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodePendingAcceptance = "pending-acceptance" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeActive = "active" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeDeleted = "deleted" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeRejected = "rejected" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeFailed = "failed" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeExpired = "expired" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeProvisioning = "provisioning" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeDeleting = "deleting" +) + +const ( + // @enum VpcState + VpcStatePending = "pending" + // @enum VpcState + VpcStateAvailable = "available" +) + +const ( + // @enum VpnState + VpnStatePending = "pending" + // @enum VpnState + VpnStateAvailable = "available" + // @enum VpnState + VpnStateDeleting = "deleting" + // @enum VpnState + VpnStateDeleted = "deleted" +) + +const ( + // @enum VpnStaticRouteSource + VpnStaticRouteSourceStatic = "Static" +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go new file mode 100644 index 0000000000000..9e94fe671cecb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -0,0 +1,55 @@ +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +func init() { + initRequest = func(r *request.Request) { + if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter + r.Handlers.Build.PushFront(fillPresignedURL) + } + } +} + +func fillPresignedURL(r *request.Request) { + if !r.ParamsFilled() { + return + } + + origParams := r.Params.(*CopySnapshotInput) + + // Stop if PresignedURL/DestinationRegion is set + if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil { + return + } + + origParams.DestinationRegion = r.Config.Region + newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput) + + // Create a new request based on the existing request. We will use this to + // presign the CopySnapshot request against the source region. + cfg := r.Config.Copy(aws.NewConfig(). + WithEndpoint(""). + WithRegion(aws.StringValue(origParams.SourceRegion))) + + clientInfo := r.ClientInfo + clientInfo.Endpoint, clientInfo.SigningRegion = endpoints.EndpointForRegion( + clientInfo.ServiceName, aws.StringValue(cfg.Region), aws.BoolValue(cfg.DisableSSL)) + + // Presign a CopySnapshot request with modified params + req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data) + url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough. + if err != nil { // bubble error back up to original request + r.Error = err + return + } + + // We have our URL, set it on params + origParams.PresignedUrl = &url +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go new file mode 100644 index 0000000000000..195d9b55b8d5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go @@ -0,0 +1,35 @@ +package ec2_test + +import ( + "io/ioutil" + "net/url" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/stretchr/testify/assert" +) + +func TestCopySnapshotPresignedURL(t *testing.T) { + svc := ec2.New(unit.Session, &aws.Config{Region: aws.String("us-west-2")}) + + assert.NotPanics(t, func() { + // Doesn't panic on nil input + req, _ := svc.CopySnapshotRequest(nil) + req.Sign() + }) + + req, _ := svc.CopySnapshotRequest(&ec2.CopySnapshotInput{ + SourceRegion: aws.String("us-west-1"), + SourceSnapshotId: aws.String("snap-id"), + }) + req.Sign() + + b, _ := ioutil.ReadAll(req.HTTPRequest.Body) + q, _ := url.ParseQuery(string(b)) + u, _ := url.QueryUnescape(q.Get("PresignedUrl")) + assert.Equal(t, "us-west-2", q.Get("DestinationRegion")) + assert.Equal(t, "us-west-1", q.Get("SourceRegion")) + assert.Regexp(t, `^https://ec2\.us-west-1\.amazonaws\.com/.+&DestinationRegion=us-west-2`, u) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go new file mode 100644 index 0000000000000..5bcf93248ae6d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go @@ -0,0 +1,764 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ec2iface provides an interface for the Amazon Elastic Compute Cloud. +package ec2iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ec2" +) + +// EC2API is the interface type for ec2.EC2. +type EC2API interface { + AcceptVpcPeeringConnectionRequest(*ec2.AcceptVpcPeeringConnectionInput) (*request.Request, *ec2.AcceptVpcPeeringConnectionOutput) + + AcceptVpcPeeringConnection(*ec2.AcceptVpcPeeringConnectionInput) (*ec2.AcceptVpcPeeringConnectionOutput, error) + + AllocateAddressRequest(*ec2.AllocateAddressInput) (*request.Request, *ec2.AllocateAddressOutput) + + AllocateAddress(*ec2.AllocateAddressInput) (*ec2.AllocateAddressOutput, error) + + AssignPrivateIpAddressesRequest(*ec2.AssignPrivateIpAddressesInput) (*request.Request, *ec2.AssignPrivateIpAddressesOutput) + + AssignPrivateIpAddresses(*ec2.AssignPrivateIpAddressesInput) (*ec2.AssignPrivateIpAddressesOutput, error) + + AssociateAddressRequest(*ec2.AssociateAddressInput) (*request.Request, *ec2.AssociateAddressOutput) + + AssociateAddress(*ec2.AssociateAddressInput) (*ec2.AssociateAddressOutput, error) + + AssociateDhcpOptionsRequest(*ec2.AssociateDhcpOptionsInput) (*request.Request, *ec2.AssociateDhcpOptionsOutput) + + AssociateDhcpOptions(*ec2.AssociateDhcpOptionsInput) (*ec2.AssociateDhcpOptionsOutput, error) + + AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput) + + AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error) + + AttachClassicLinkVpcRequest(*ec2.AttachClassicLinkVpcInput) (*request.Request, *ec2.AttachClassicLinkVpcOutput) + + AttachClassicLinkVpc(*ec2.AttachClassicLinkVpcInput) (*ec2.AttachClassicLinkVpcOutput, error) + + AttachInternetGatewayRequest(*ec2.AttachInternetGatewayInput) (*request.Request, *ec2.AttachInternetGatewayOutput) + + AttachInternetGateway(*ec2.AttachInternetGatewayInput) (*ec2.AttachInternetGatewayOutput, error) + + AttachNetworkInterfaceRequest(*ec2.AttachNetworkInterfaceInput) (*request.Request, *ec2.AttachNetworkInterfaceOutput) + + AttachNetworkInterface(*ec2.AttachNetworkInterfaceInput) (*ec2.AttachNetworkInterfaceOutput, error) + + AttachVolumeRequest(*ec2.AttachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) + + AttachVpnGatewayRequest(*ec2.AttachVpnGatewayInput) (*request.Request, *ec2.AttachVpnGatewayOutput) + + AttachVpnGateway(*ec2.AttachVpnGatewayInput) (*ec2.AttachVpnGatewayOutput, error) + + AuthorizeSecurityGroupEgressRequest(*ec2.AuthorizeSecurityGroupEgressInput) (*request.Request, *ec2.AuthorizeSecurityGroupEgressOutput) + + AuthorizeSecurityGroupEgress(*ec2.AuthorizeSecurityGroupEgressInput) (*ec2.AuthorizeSecurityGroupEgressOutput, error) + + AuthorizeSecurityGroupIngressRequest(*ec2.AuthorizeSecurityGroupIngressInput) (*request.Request, *ec2.AuthorizeSecurityGroupIngressOutput) + + AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) + + BundleInstanceRequest(*ec2.BundleInstanceInput) (*request.Request, *ec2.BundleInstanceOutput) + + BundleInstance(*ec2.BundleInstanceInput) (*ec2.BundleInstanceOutput, error) + + CancelBundleTaskRequest(*ec2.CancelBundleTaskInput) (*request.Request, *ec2.CancelBundleTaskOutput) + + CancelBundleTask(*ec2.CancelBundleTaskInput) (*ec2.CancelBundleTaskOutput, error) + + CancelConversionTaskRequest(*ec2.CancelConversionTaskInput) (*request.Request, *ec2.CancelConversionTaskOutput) + + CancelConversionTask(*ec2.CancelConversionTaskInput) (*ec2.CancelConversionTaskOutput, error) + + CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput) + + CancelExportTask(*ec2.CancelExportTaskInput) (*ec2.CancelExportTaskOutput, error) + + CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput) + + CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error) + + CancelReservedInstancesListingRequest(*ec2.CancelReservedInstancesListingInput) (*request.Request, *ec2.CancelReservedInstancesListingOutput) + + CancelReservedInstancesListing(*ec2.CancelReservedInstancesListingInput) (*ec2.CancelReservedInstancesListingOutput, error) + + CancelSpotFleetRequestsRequest(*ec2.CancelSpotFleetRequestsInput) (*request.Request, *ec2.CancelSpotFleetRequestsOutput) + + CancelSpotFleetRequests(*ec2.CancelSpotFleetRequestsInput) (*ec2.CancelSpotFleetRequestsOutput, error) + + CancelSpotInstanceRequestsRequest(*ec2.CancelSpotInstanceRequestsInput) (*request.Request, *ec2.CancelSpotInstanceRequestsOutput) + + CancelSpotInstanceRequests(*ec2.CancelSpotInstanceRequestsInput) (*ec2.CancelSpotInstanceRequestsOutput, error) + + ConfirmProductInstanceRequest(*ec2.ConfirmProductInstanceInput) (*request.Request, *ec2.ConfirmProductInstanceOutput) + + ConfirmProductInstance(*ec2.ConfirmProductInstanceInput) (*ec2.ConfirmProductInstanceOutput, error) + + CopyImageRequest(*ec2.CopyImageInput) (*request.Request, *ec2.CopyImageOutput) + + CopyImage(*ec2.CopyImageInput) (*ec2.CopyImageOutput, error) + + CopySnapshotRequest(*ec2.CopySnapshotInput) (*request.Request, *ec2.CopySnapshotOutput) + + CopySnapshot(*ec2.CopySnapshotInput) (*ec2.CopySnapshotOutput, error) + + CreateCustomerGatewayRequest(*ec2.CreateCustomerGatewayInput) (*request.Request, *ec2.CreateCustomerGatewayOutput) + + CreateCustomerGateway(*ec2.CreateCustomerGatewayInput) (*ec2.CreateCustomerGatewayOutput, error) + + CreateDhcpOptionsRequest(*ec2.CreateDhcpOptionsInput) (*request.Request, *ec2.CreateDhcpOptionsOutput) + + CreateDhcpOptions(*ec2.CreateDhcpOptionsInput) (*ec2.CreateDhcpOptionsOutput, error) + + CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput) + + CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error) + + CreateImageRequest(*ec2.CreateImageInput) (*request.Request, *ec2.CreateImageOutput) + + CreateImage(*ec2.CreateImageInput) (*ec2.CreateImageOutput, error) + + CreateInstanceExportTaskRequest(*ec2.CreateInstanceExportTaskInput) (*request.Request, *ec2.CreateInstanceExportTaskOutput) + + CreateInstanceExportTask(*ec2.CreateInstanceExportTaskInput) (*ec2.CreateInstanceExportTaskOutput, error) + + CreateInternetGatewayRequest(*ec2.CreateInternetGatewayInput) (*request.Request, *ec2.CreateInternetGatewayOutput) + + CreateInternetGateway(*ec2.CreateInternetGatewayInput) (*ec2.CreateInternetGatewayOutput, error) + + CreateKeyPairRequest(*ec2.CreateKeyPairInput) (*request.Request, *ec2.CreateKeyPairOutput) + + CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error) + + CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput) + + CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error) + + CreateNetworkAclEntryRequest(*ec2.CreateNetworkAclEntryInput) (*request.Request, *ec2.CreateNetworkAclEntryOutput) + + CreateNetworkAclEntry(*ec2.CreateNetworkAclEntryInput) (*ec2.CreateNetworkAclEntryOutput, error) + + CreateNetworkInterfaceRequest(*ec2.CreateNetworkInterfaceInput) (*request.Request, *ec2.CreateNetworkInterfaceOutput) + + CreateNetworkInterface(*ec2.CreateNetworkInterfaceInput) (*ec2.CreateNetworkInterfaceOutput, error) + + CreatePlacementGroupRequest(*ec2.CreatePlacementGroupInput) (*request.Request, *ec2.CreatePlacementGroupOutput) + + CreatePlacementGroup(*ec2.CreatePlacementGroupInput) (*ec2.CreatePlacementGroupOutput, error) + + CreateReservedInstancesListingRequest(*ec2.CreateReservedInstancesListingInput) (*request.Request, *ec2.CreateReservedInstancesListingOutput) + + CreateReservedInstancesListing(*ec2.CreateReservedInstancesListingInput) (*ec2.CreateReservedInstancesListingOutput, error) + + CreateRouteRequest(*ec2.CreateRouteInput) (*request.Request, *ec2.CreateRouteOutput) + + CreateRoute(*ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) + + CreateRouteTableRequest(*ec2.CreateRouteTableInput) (*request.Request, *ec2.CreateRouteTableOutput) + + CreateRouteTable(*ec2.CreateRouteTableInput) (*ec2.CreateRouteTableOutput, error) + + CreateSecurityGroupRequest(*ec2.CreateSecurityGroupInput) (*request.Request, *ec2.CreateSecurityGroupOutput) + + CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) + + CreateSnapshotRequest(*ec2.CreateSnapshotInput) (*request.Request, *ec2.Snapshot) + + CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error) + + CreateSpotDatafeedSubscriptionRequest(*ec2.CreateSpotDatafeedSubscriptionInput) (*request.Request, *ec2.CreateSpotDatafeedSubscriptionOutput) + + CreateSpotDatafeedSubscription(*ec2.CreateSpotDatafeedSubscriptionInput) (*ec2.CreateSpotDatafeedSubscriptionOutput, error) + + CreateSubnetRequest(*ec2.CreateSubnetInput) (*request.Request, *ec2.CreateSubnetOutput) + + CreateSubnet(*ec2.CreateSubnetInput) (*ec2.CreateSubnetOutput, error) + + CreateTagsRequest(*ec2.CreateTagsInput) (*request.Request, *ec2.CreateTagsOutput) + + CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) + + CreateVolumeRequest(*ec2.CreateVolumeInput) (*request.Request, *ec2.Volume) + + CreateVolume(*ec2.CreateVolumeInput) (*ec2.Volume, error) + + CreateVpcRequest(*ec2.CreateVpcInput) (*request.Request, *ec2.CreateVpcOutput) + + CreateVpc(*ec2.CreateVpcInput) (*ec2.CreateVpcOutput, error) + + CreateVpcEndpointRequest(*ec2.CreateVpcEndpointInput) (*request.Request, *ec2.CreateVpcEndpointOutput) + + CreateVpcEndpoint(*ec2.CreateVpcEndpointInput) (*ec2.CreateVpcEndpointOutput, error) + + CreateVpcPeeringConnectionRequest(*ec2.CreateVpcPeeringConnectionInput) (*request.Request, *ec2.CreateVpcPeeringConnectionOutput) + + CreateVpcPeeringConnection(*ec2.CreateVpcPeeringConnectionInput) (*ec2.CreateVpcPeeringConnectionOutput, error) + + CreateVpnConnectionRequest(*ec2.CreateVpnConnectionInput) (*request.Request, *ec2.CreateVpnConnectionOutput) + + CreateVpnConnection(*ec2.CreateVpnConnectionInput) (*ec2.CreateVpnConnectionOutput, error) + + CreateVpnConnectionRouteRequest(*ec2.CreateVpnConnectionRouteInput) (*request.Request, *ec2.CreateVpnConnectionRouteOutput) + + CreateVpnConnectionRoute(*ec2.CreateVpnConnectionRouteInput) (*ec2.CreateVpnConnectionRouteOutput, error) + + CreateVpnGatewayRequest(*ec2.CreateVpnGatewayInput) (*request.Request, *ec2.CreateVpnGatewayOutput) + + CreateVpnGateway(*ec2.CreateVpnGatewayInput) (*ec2.CreateVpnGatewayOutput, error) + + DeleteCustomerGatewayRequest(*ec2.DeleteCustomerGatewayInput) (*request.Request, *ec2.DeleteCustomerGatewayOutput) + + DeleteCustomerGateway(*ec2.DeleteCustomerGatewayInput) (*ec2.DeleteCustomerGatewayOutput, error) + + DeleteDhcpOptionsRequest(*ec2.DeleteDhcpOptionsInput) (*request.Request, *ec2.DeleteDhcpOptionsOutput) + + DeleteDhcpOptions(*ec2.DeleteDhcpOptionsInput) (*ec2.DeleteDhcpOptionsOutput, error) + + DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput) + + DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error) + + DeleteInternetGatewayRequest(*ec2.DeleteInternetGatewayInput) (*request.Request, *ec2.DeleteInternetGatewayOutput) + + DeleteInternetGateway(*ec2.DeleteInternetGatewayInput) (*ec2.DeleteInternetGatewayOutput, error) + + DeleteKeyPairRequest(*ec2.DeleteKeyPairInput) (*request.Request, *ec2.DeleteKeyPairOutput) + + DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error) + + DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput) + + DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error) + + DeleteNetworkAclEntryRequest(*ec2.DeleteNetworkAclEntryInput) (*request.Request, *ec2.DeleteNetworkAclEntryOutput) + + DeleteNetworkAclEntry(*ec2.DeleteNetworkAclEntryInput) (*ec2.DeleteNetworkAclEntryOutput, error) + + DeleteNetworkInterfaceRequest(*ec2.DeleteNetworkInterfaceInput) (*request.Request, *ec2.DeleteNetworkInterfaceOutput) + + DeleteNetworkInterface(*ec2.DeleteNetworkInterfaceInput) (*ec2.DeleteNetworkInterfaceOutput, error) + + DeletePlacementGroupRequest(*ec2.DeletePlacementGroupInput) (*request.Request, *ec2.DeletePlacementGroupOutput) + + DeletePlacementGroup(*ec2.DeletePlacementGroupInput) (*ec2.DeletePlacementGroupOutput, error) + + DeleteRouteRequest(*ec2.DeleteRouteInput) (*request.Request, *ec2.DeleteRouteOutput) + + DeleteRoute(*ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) + + DeleteRouteTableRequest(*ec2.DeleteRouteTableInput) (*request.Request, *ec2.DeleteRouteTableOutput) + + DeleteRouteTable(*ec2.DeleteRouteTableInput) (*ec2.DeleteRouteTableOutput, error) + + DeleteSecurityGroupRequest(*ec2.DeleteSecurityGroupInput) (*request.Request, *ec2.DeleteSecurityGroupOutput) + + DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) + + DeleteSnapshotRequest(*ec2.DeleteSnapshotInput) (*request.Request, *ec2.DeleteSnapshotOutput) + + DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error) + + DeleteSpotDatafeedSubscriptionRequest(*ec2.DeleteSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DeleteSpotDatafeedSubscriptionOutput) + + DeleteSpotDatafeedSubscription(*ec2.DeleteSpotDatafeedSubscriptionInput) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error) + + DeleteSubnetRequest(*ec2.DeleteSubnetInput) (*request.Request, *ec2.DeleteSubnetOutput) + + DeleteSubnet(*ec2.DeleteSubnetInput) (*ec2.DeleteSubnetOutput, error) + + DeleteTagsRequest(*ec2.DeleteTagsInput) (*request.Request, *ec2.DeleteTagsOutput) + + DeleteTags(*ec2.DeleteTagsInput) (*ec2.DeleteTagsOutput, error) + + DeleteVolumeRequest(*ec2.DeleteVolumeInput) (*request.Request, *ec2.DeleteVolumeOutput) + + DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) + + DeleteVpcRequest(*ec2.DeleteVpcInput) (*request.Request, *ec2.DeleteVpcOutput) + + DeleteVpc(*ec2.DeleteVpcInput) (*ec2.DeleteVpcOutput, error) + + DeleteVpcEndpointsRequest(*ec2.DeleteVpcEndpointsInput) (*request.Request, *ec2.DeleteVpcEndpointsOutput) + + DeleteVpcEndpoints(*ec2.DeleteVpcEndpointsInput) (*ec2.DeleteVpcEndpointsOutput, error) + + DeleteVpcPeeringConnectionRequest(*ec2.DeleteVpcPeeringConnectionInput) (*request.Request, *ec2.DeleteVpcPeeringConnectionOutput) + + DeleteVpcPeeringConnection(*ec2.DeleteVpcPeeringConnectionInput) (*ec2.DeleteVpcPeeringConnectionOutput, error) + + DeleteVpnConnectionRequest(*ec2.DeleteVpnConnectionInput) (*request.Request, *ec2.DeleteVpnConnectionOutput) + + DeleteVpnConnection(*ec2.DeleteVpnConnectionInput) (*ec2.DeleteVpnConnectionOutput, error) + + DeleteVpnConnectionRouteRequest(*ec2.DeleteVpnConnectionRouteInput) (*request.Request, *ec2.DeleteVpnConnectionRouteOutput) + + DeleteVpnConnectionRoute(*ec2.DeleteVpnConnectionRouteInput) (*ec2.DeleteVpnConnectionRouteOutput, error) + + DeleteVpnGatewayRequest(*ec2.DeleteVpnGatewayInput) (*request.Request, *ec2.DeleteVpnGatewayOutput) + + DeleteVpnGateway(*ec2.DeleteVpnGatewayInput) (*ec2.DeleteVpnGatewayOutput, error) + + DeregisterImageRequest(*ec2.DeregisterImageInput) (*request.Request, *ec2.DeregisterImageOutput) + + DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error) + + DescribeAccountAttributesRequest(*ec2.DescribeAccountAttributesInput) (*request.Request, *ec2.DescribeAccountAttributesOutput) + + DescribeAccountAttributes(*ec2.DescribeAccountAttributesInput) (*ec2.DescribeAccountAttributesOutput, error) + + DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput) + + DescribeAddresses(*ec2.DescribeAddressesInput) (*ec2.DescribeAddressesOutput, error) + + DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput) + + DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error) + + DescribeBundleTasksRequest(*ec2.DescribeBundleTasksInput) (*request.Request, *ec2.DescribeBundleTasksOutput) + + DescribeBundleTasks(*ec2.DescribeBundleTasksInput) (*ec2.DescribeBundleTasksOutput, error) + + DescribeClassicLinkInstancesRequest(*ec2.DescribeClassicLinkInstancesInput) (*request.Request, *ec2.DescribeClassicLinkInstancesOutput) + + DescribeClassicLinkInstances(*ec2.DescribeClassicLinkInstancesInput) (*ec2.DescribeClassicLinkInstancesOutput, error) + + DescribeConversionTasksRequest(*ec2.DescribeConversionTasksInput) (*request.Request, *ec2.DescribeConversionTasksOutput) + + DescribeConversionTasks(*ec2.DescribeConversionTasksInput) (*ec2.DescribeConversionTasksOutput, error) + + DescribeCustomerGatewaysRequest(*ec2.DescribeCustomerGatewaysInput) (*request.Request, *ec2.DescribeCustomerGatewaysOutput) + + DescribeCustomerGateways(*ec2.DescribeCustomerGatewaysInput) (*ec2.DescribeCustomerGatewaysOutput, error) + + DescribeDhcpOptionsRequest(*ec2.DescribeDhcpOptionsInput) (*request.Request, *ec2.DescribeDhcpOptionsOutput) + + DescribeDhcpOptions(*ec2.DescribeDhcpOptionsInput) (*ec2.DescribeDhcpOptionsOutput, error) + + DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput) + + DescribeExportTasks(*ec2.DescribeExportTasksInput) (*ec2.DescribeExportTasksOutput, error) + + DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput) + + DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error) + + DescribeImageAttributeRequest(*ec2.DescribeImageAttributeInput) (*request.Request, *ec2.DescribeImageAttributeOutput) + + DescribeImageAttribute(*ec2.DescribeImageAttributeInput) (*ec2.DescribeImageAttributeOutput, error) + + DescribeImagesRequest(*ec2.DescribeImagesInput) (*request.Request, *ec2.DescribeImagesOutput) + + DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error) + + DescribeImportImageTasksRequest(*ec2.DescribeImportImageTasksInput) (*request.Request, *ec2.DescribeImportImageTasksOutput) + + DescribeImportImageTasks(*ec2.DescribeImportImageTasksInput) (*ec2.DescribeImportImageTasksOutput, error) + + DescribeImportSnapshotTasksRequest(*ec2.DescribeImportSnapshotTasksInput) (*request.Request, *ec2.DescribeImportSnapshotTasksOutput) + + DescribeImportSnapshotTasks(*ec2.DescribeImportSnapshotTasksInput) (*ec2.DescribeImportSnapshotTasksOutput, error) + + DescribeInstanceAttributeRequest(*ec2.DescribeInstanceAttributeInput) (*request.Request, *ec2.DescribeInstanceAttributeOutput) + + DescribeInstanceAttribute(*ec2.DescribeInstanceAttributeInput) (*ec2.DescribeInstanceAttributeOutput, error) + + DescribeInstanceStatusRequest(*ec2.DescribeInstanceStatusInput) (*request.Request, *ec2.DescribeInstanceStatusOutput) + + DescribeInstanceStatus(*ec2.DescribeInstanceStatusInput) (*ec2.DescribeInstanceStatusOutput, error) + + DescribeInstanceStatusPages(*ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool) error + + DescribeInstancesRequest(*ec2.DescribeInstancesInput) (*request.Request, *ec2.DescribeInstancesOutput) + + DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) + + DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool) error + + DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) + + DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) + + DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput) + + DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) + + DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput) + + DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error) + + DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput) + + DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error) + + DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput) + + DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error) + + DescribeNetworkInterfacesRequest(*ec2.DescribeNetworkInterfacesInput) (*request.Request, *ec2.DescribeNetworkInterfacesOutput) + + DescribeNetworkInterfaces(*ec2.DescribeNetworkInterfacesInput) (*ec2.DescribeNetworkInterfacesOutput, error) + + DescribePlacementGroupsRequest(*ec2.DescribePlacementGroupsInput) (*request.Request, *ec2.DescribePlacementGroupsOutput) + + DescribePlacementGroups(*ec2.DescribePlacementGroupsInput) (*ec2.DescribePlacementGroupsOutput, error) + + DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput) + + DescribePrefixLists(*ec2.DescribePrefixListsInput) (*ec2.DescribePrefixListsOutput, error) + + DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput) + + DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) + + DescribeReservedInstancesRequest(*ec2.DescribeReservedInstancesInput) (*request.Request, *ec2.DescribeReservedInstancesOutput) + + DescribeReservedInstances(*ec2.DescribeReservedInstancesInput) (*ec2.DescribeReservedInstancesOutput, error) + + DescribeReservedInstancesListingsRequest(*ec2.DescribeReservedInstancesListingsInput) (*request.Request, *ec2.DescribeReservedInstancesListingsOutput) + + DescribeReservedInstancesListings(*ec2.DescribeReservedInstancesListingsInput) (*ec2.DescribeReservedInstancesListingsOutput, error) + + DescribeReservedInstancesModificationsRequest(*ec2.DescribeReservedInstancesModificationsInput) (*request.Request, *ec2.DescribeReservedInstancesModificationsOutput) + + DescribeReservedInstancesModifications(*ec2.DescribeReservedInstancesModificationsInput) (*ec2.DescribeReservedInstancesModificationsOutput, error) + + DescribeReservedInstancesModificationsPages(*ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool) error + + DescribeReservedInstancesOfferingsRequest(*ec2.DescribeReservedInstancesOfferingsInput) (*request.Request, *ec2.DescribeReservedInstancesOfferingsOutput) + + DescribeReservedInstancesOfferings(*ec2.DescribeReservedInstancesOfferingsInput) (*ec2.DescribeReservedInstancesOfferingsOutput, error) + + DescribeReservedInstancesOfferingsPages(*ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool) error + + DescribeRouteTablesRequest(*ec2.DescribeRouteTablesInput) (*request.Request, *ec2.DescribeRouteTablesOutput) + + DescribeRouteTables(*ec2.DescribeRouteTablesInput) (*ec2.DescribeRouteTablesOutput, error) + + DescribeSecurityGroupsRequest(*ec2.DescribeSecurityGroupsInput) (*request.Request, *ec2.DescribeSecurityGroupsOutput) + + DescribeSecurityGroups(*ec2.DescribeSecurityGroupsInput) (*ec2.DescribeSecurityGroupsOutput, error) + + DescribeSnapshotAttributeRequest(*ec2.DescribeSnapshotAttributeInput) (*request.Request, *ec2.DescribeSnapshotAttributeOutput) + + DescribeSnapshotAttribute(*ec2.DescribeSnapshotAttributeInput) (*ec2.DescribeSnapshotAttributeOutput, error) + + DescribeSnapshotsRequest(*ec2.DescribeSnapshotsInput) (*request.Request, *ec2.DescribeSnapshotsOutput) + + DescribeSnapshots(*ec2.DescribeSnapshotsInput) (*ec2.DescribeSnapshotsOutput, error) + + DescribeSnapshotsPages(*ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool) error + + DescribeSpotDatafeedSubscriptionRequest(*ec2.DescribeSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DescribeSpotDatafeedSubscriptionOutput) + + DescribeSpotDatafeedSubscription(*ec2.DescribeSpotDatafeedSubscriptionInput) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error) + + DescribeSpotFleetInstancesRequest(*ec2.DescribeSpotFleetInstancesInput) (*request.Request, *ec2.DescribeSpotFleetInstancesOutput) + + DescribeSpotFleetInstances(*ec2.DescribeSpotFleetInstancesInput) (*ec2.DescribeSpotFleetInstancesOutput, error) + + DescribeSpotFleetRequestHistoryRequest(*ec2.DescribeSpotFleetRequestHistoryInput) (*request.Request, *ec2.DescribeSpotFleetRequestHistoryOutput) + + DescribeSpotFleetRequestHistory(*ec2.DescribeSpotFleetRequestHistoryInput) (*ec2.DescribeSpotFleetRequestHistoryOutput, error) + + DescribeSpotFleetRequestsRequest(*ec2.DescribeSpotFleetRequestsInput) (*request.Request, *ec2.DescribeSpotFleetRequestsOutput) + + DescribeSpotFleetRequests(*ec2.DescribeSpotFleetRequestsInput) (*ec2.DescribeSpotFleetRequestsOutput, error) + + DescribeSpotInstanceRequestsRequest(*ec2.DescribeSpotInstanceRequestsInput) (*request.Request, *ec2.DescribeSpotInstanceRequestsOutput) + + DescribeSpotInstanceRequests(*ec2.DescribeSpotInstanceRequestsInput) (*ec2.DescribeSpotInstanceRequestsOutput, error) + + DescribeSpotPriceHistoryRequest(*ec2.DescribeSpotPriceHistoryInput) (*request.Request, *ec2.DescribeSpotPriceHistoryOutput) + + DescribeSpotPriceHistory(*ec2.DescribeSpotPriceHistoryInput) (*ec2.DescribeSpotPriceHistoryOutput, error) + + DescribeSpotPriceHistoryPages(*ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error + + DescribeSubnetsRequest(*ec2.DescribeSubnetsInput) (*request.Request, *ec2.DescribeSubnetsOutput) + + DescribeSubnets(*ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error) + + DescribeTagsRequest(*ec2.DescribeTagsInput) (*request.Request, *ec2.DescribeTagsOutput) + + DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) + + DescribeTagsPages(*ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool) error + + DescribeVolumeAttributeRequest(*ec2.DescribeVolumeAttributeInput) (*request.Request, *ec2.DescribeVolumeAttributeOutput) + + DescribeVolumeAttribute(*ec2.DescribeVolumeAttributeInput) (*ec2.DescribeVolumeAttributeOutput, error) + + DescribeVolumeStatusRequest(*ec2.DescribeVolumeStatusInput) (*request.Request, *ec2.DescribeVolumeStatusOutput) + + DescribeVolumeStatus(*ec2.DescribeVolumeStatusInput) (*ec2.DescribeVolumeStatusOutput, error) + + DescribeVolumeStatusPages(*ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool) error + + DescribeVolumesRequest(*ec2.DescribeVolumesInput) (*request.Request, *ec2.DescribeVolumesOutput) + + DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error) + + DescribeVolumesPages(*ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool) error + + DescribeVpcAttributeRequest(*ec2.DescribeVpcAttributeInput) (*request.Request, *ec2.DescribeVpcAttributeOutput) + + DescribeVpcAttribute(*ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error) + + DescribeVpcClassicLinkRequest(*ec2.DescribeVpcClassicLinkInput) (*request.Request, *ec2.DescribeVpcClassicLinkOutput) + + DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error) + + DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput) + + DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error) + + DescribeVpcEndpointsRequest(*ec2.DescribeVpcEndpointsInput) (*request.Request, *ec2.DescribeVpcEndpointsOutput) + + DescribeVpcEndpoints(*ec2.DescribeVpcEndpointsInput) (*ec2.DescribeVpcEndpointsOutput, error) + + DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput) + + DescribeVpcPeeringConnections(*ec2.DescribeVpcPeeringConnectionsInput) (*ec2.DescribeVpcPeeringConnectionsOutput, error) + + DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput) + + DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) + + DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput) + + DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error) + + DescribeVpnGatewaysRequest(*ec2.DescribeVpnGatewaysInput) (*request.Request, *ec2.DescribeVpnGatewaysOutput) + + DescribeVpnGateways(*ec2.DescribeVpnGatewaysInput) (*ec2.DescribeVpnGatewaysOutput, error) + + DetachClassicLinkVpcRequest(*ec2.DetachClassicLinkVpcInput) (*request.Request, *ec2.DetachClassicLinkVpcOutput) + + DetachClassicLinkVpc(*ec2.DetachClassicLinkVpcInput) (*ec2.DetachClassicLinkVpcOutput, error) + + DetachInternetGatewayRequest(*ec2.DetachInternetGatewayInput) (*request.Request, *ec2.DetachInternetGatewayOutput) + + DetachInternetGateway(*ec2.DetachInternetGatewayInput) (*ec2.DetachInternetGatewayOutput, error) + + DetachNetworkInterfaceRequest(*ec2.DetachNetworkInterfaceInput) (*request.Request, *ec2.DetachNetworkInterfaceOutput) + + DetachNetworkInterface(*ec2.DetachNetworkInterfaceInput) (*ec2.DetachNetworkInterfaceOutput, error) + + DetachVolumeRequest(*ec2.DetachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + DetachVolume(*ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) + + DetachVpnGatewayRequest(*ec2.DetachVpnGatewayInput) (*request.Request, *ec2.DetachVpnGatewayOutput) + + DetachVpnGateway(*ec2.DetachVpnGatewayInput) (*ec2.DetachVpnGatewayOutput, error) + + DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) (*request.Request, *ec2.DisableVgwRoutePropagationOutput) + + DisableVgwRoutePropagation(*ec2.DisableVgwRoutePropagationInput) (*ec2.DisableVgwRoutePropagationOutput, error) + + DisableVpcClassicLinkRequest(*ec2.DisableVpcClassicLinkInput) (*request.Request, *ec2.DisableVpcClassicLinkOutput) + + DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error) + + DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput) + + DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error) + + DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput) + + DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error) + + EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) (*request.Request, *ec2.EnableVgwRoutePropagationOutput) + + EnableVgwRoutePropagation(*ec2.EnableVgwRoutePropagationInput) (*ec2.EnableVgwRoutePropagationOutput, error) + + EnableVolumeIORequest(*ec2.EnableVolumeIOInput) (*request.Request, *ec2.EnableVolumeIOOutput) + + EnableVolumeIO(*ec2.EnableVolumeIOInput) (*ec2.EnableVolumeIOOutput, error) + + EnableVpcClassicLinkRequest(*ec2.EnableVpcClassicLinkInput) (*request.Request, *ec2.EnableVpcClassicLinkOutput) + + EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error) + + GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput) + + GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error) + + GetPasswordDataRequest(*ec2.GetPasswordDataInput) (*request.Request, *ec2.GetPasswordDataOutput) + + GetPasswordData(*ec2.GetPasswordDataInput) (*ec2.GetPasswordDataOutput, error) + + ImportImageRequest(*ec2.ImportImageInput) (*request.Request, *ec2.ImportImageOutput) + + ImportImage(*ec2.ImportImageInput) (*ec2.ImportImageOutput, error) + + ImportInstanceRequest(*ec2.ImportInstanceInput) (*request.Request, *ec2.ImportInstanceOutput) + + ImportInstance(*ec2.ImportInstanceInput) (*ec2.ImportInstanceOutput, error) + + ImportKeyPairRequest(*ec2.ImportKeyPairInput) (*request.Request, *ec2.ImportKeyPairOutput) + + ImportKeyPair(*ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) + + ImportSnapshotRequest(*ec2.ImportSnapshotInput) (*request.Request, *ec2.ImportSnapshotOutput) + + ImportSnapshot(*ec2.ImportSnapshotInput) (*ec2.ImportSnapshotOutput, error) + + ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput) + + ImportVolume(*ec2.ImportVolumeInput) (*ec2.ImportVolumeOutput, error) + + ModifyImageAttributeRequest(*ec2.ModifyImageAttributeInput) (*request.Request, *ec2.ModifyImageAttributeOutput) + + ModifyImageAttribute(*ec2.ModifyImageAttributeInput) (*ec2.ModifyImageAttributeOutput, error) + + ModifyInstanceAttributeRequest(*ec2.ModifyInstanceAttributeInput) (*request.Request, *ec2.ModifyInstanceAttributeOutput) + + ModifyInstanceAttribute(*ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) + + ModifyNetworkInterfaceAttributeRequest(*ec2.ModifyNetworkInterfaceAttributeInput) (*request.Request, *ec2.ModifyNetworkInterfaceAttributeOutput) + + ModifyNetworkInterfaceAttribute(*ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error) + + ModifyReservedInstancesRequest(*ec2.ModifyReservedInstancesInput) (*request.Request, *ec2.ModifyReservedInstancesOutput) + + ModifyReservedInstances(*ec2.ModifyReservedInstancesInput) (*ec2.ModifyReservedInstancesOutput, error) + + ModifySnapshotAttributeRequest(*ec2.ModifySnapshotAttributeInput) (*request.Request, *ec2.ModifySnapshotAttributeOutput) + + ModifySnapshotAttribute(*ec2.ModifySnapshotAttributeInput) (*ec2.ModifySnapshotAttributeOutput, error) + + ModifySpotFleetRequestRequest(*ec2.ModifySpotFleetRequestInput) (*request.Request, *ec2.ModifySpotFleetRequestOutput) + + ModifySpotFleetRequest(*ec2.ModifySpotFleetRequestInput) (*ec2.ModifySpotFleetRequestOutput, error) + + ModifySubnetAttributeRequest(*ec2.ModifySubnetAttributeInput) (*request.Request, *ec2.ModifySubnetAttributeOutput) + + ModifySubnetAttribute(*ec2.ModifySubnetAttributeInput) (*ec2.ModifySubnetAttributeOutput, error) + + ModifyVolumeAttributeRequest(*ec2.ModifyVolumeAttributeInput) (*request.Request, *ec2.ModifyVolumeAttributeOutput) + + ModifyVolumeAttribute(*ec2.ModifyVolumeAttributeInput) (*ec2.ModifyVolumeAttributeOutput, error) + + ModifyVpcAttributeRequest(*ec2.ModifyVpcAttributeInput) (*request.Request, *ec2.ModifyVpcAttributeOutput) + + ModifyVpcAttribute(*ec2.ModifyVpcAttributeInput) (*ec2.ModifyVpcAttributeOutput, error) + + ModifyVpcEndpointRequest(*ec2.ModifyVpcEndpointInput) (*request.Request, *ec2.ModifyVpcEndpointOutput) + + ModifyVpcEndpoint(*ec2.ModifyVpcEndpointInput) (*ec2.ModifyVpcEndpointOutput, error) + + MonitorInstancesRequest(*ec2.MonitorInstancesInput) (*request.Request, *ec2.MonitorInstancesOutput) + + MonitorInstances(*ec2.MonitorInstancesInput) (*ec2.MonitorInstancesOutput, error) + + MoveAddressToVpcRequest(*ec2.MoveAddressToVpcInput) (*request.Request, *ec2.MoveAddressToVpcOutput) + + MoveAddressToVpc(*ec2.MoveAddressToVpcInput) (*ec2.MoveAddressToVpcOutput, error) + + PurchaseReservedInstancesOfferingRequest(*ec2.PurchaseReservedInstancesOfferingInput) (*request.Request, *ec2.PurchaseReservedInstancesOfferingOutput) + + PurchaseReservedInstancesOffering(*ec2.PurchaseReservedInstancesOfferingInput) (*ec2.PurchaseReservedInstancesOfferingOutput, error) + + RebootInstancesRequest(*ec2.RebootInstancesInput) (*request.Request, *ec2.RebootInstancesOutput) + + RebootInstances(*ec2.RebootInstancesInput) (*ec2.RebootInstancesOutput, error) + + RegisterImageRequest(*ec2.RegisterImageInput) (*request.Request, *ec2.RegisterImageOutput) + + RegisterImage(*ec2.RegisterImageInput) (*ec2.RegisterImageOutput, error) + + RejectVpcPeeringConnectionRequest(*ec2.RejectVpcPeeringConnectionInput) (*request.Request, *ec2.RejectVpcPeeringConnectionOutput) + + RejectVpcPeeringConnection(*ec2.RejectVpcPeeringConnectionInput) (*ec2.RejectVpcPeeringConnectionOutput, error) + + ReleaseAddressRequest(*ec2.ReleaseAddressInput) (*request.Request, *ec2.ReleaseAddressOutput) + + ReleaseAddress(*ec2.ReleaseAddressInput) (*ec2.ReleaseAddressOutput, error) + + ReplaceNetworkAclAssociationRequest(*ec2.ReplaceNetworkAclAssociationInput) (*request.Request, *ec2.ReplaceNetworkAclAssociationOutput) + + ReplaceNetworkAclAssociation(*ec2.ReplaceNetworkAclAssociationInput) (*ec2.ReplaceNetworkAclAssociationOutput, error) + + ReplaceNetworkAclEntryRequest(*ec2.ReplaceNetworkAclEntryInput) (*request.Request, *ec2.ReplaceNetworkAclEntryOutput) + + ReplaceNetworkAclEntry(*ec2.ReplaceNetworkAclEntryInput) (*ec2.ReplaceNetworkAclEntryOutput, error) + + ReplaceRouteRequest(*ec2.ReplaceRouteInput) (*request.Request, *ec2.ReplaceRouteOutput) + + ReplaceRoute(*ec2.ReplaceRouteInput) (*ec2.ReplaceRouteOutput, error) + + ReplaceRouteTableAssociationRequest(*ec2.ReplaceRouteTableAssociationInput) (*request.Request, *ec2.ReplaceRouteTableAssociationOutput) + + ReplaceRouteTableAssociation(*ec2.ReplaceRouteTableAssociationInput) (*ec2.ReplaceRouteTableAssociationOutput, error) + + ReportInstanceStatusRequest(*ec2.ReportInstanceStatusInput) (*request.Request, *ec2.ReportInstanceStatusOutput) + + ReportInstanceStatus(*ec2.ReportInstanceStatusInput) (*ec2.ReportInstanceStatusOutput, error) + + RequestSpotFleetRequest(*ec2.RequestSpotFleetInput) (*request.Request, *ec2.RequestSpotFleetOutput) + + RequestSpotFleet(*ec2.RequestSpotFleetInput) (*ec2.RequestSpotFleetOutput, error) + + RequestSpotInstancesRequest(*ec2.RequestSpotInstancesInput) (*request.Request, *ec2.RequestSpotInstancesOutput) + + RequestSpotInstances(*ec2.RequestSpotInstancesInput) (*ec2.RequestSpotInstancesOutput, error) + + ResetImageAttributeRequest(*ec2.ResetImageAttributeInput) (*request.Request, *ec2.ResetImageAttributeOutput) + + ResetImageAttribute(*ec2.ResetImageAttributeInput) (*ec2.ResetImageAttributeOutput, error) + + ResetInstanceAttributeRequest(*ec2.ResetInstanceAttributeInput) (*request.Request, *ec2.ResetInstanceAttributeOutput) + + ResetInstanceAttribute(*ec2.ResetInstanceAttributeInput) (*ec2.ResetInstanceAttributeOutput, error) + + ResetNetworkInterfaceAttributeRequest(*ec2.ResetNetworkInterfaceAttributeInput) (*request.Request, *ec2.ResetNetworkInterfaceAttributeOutput) + + ResetNetworkInterfaceAttribute(*ec2.ResetNetworkInterfaceAttributeInput) (*ec2.ResetNetworkInterfaceAttributeOutput, error) + + ResetSnapshotAttributeRequest(*ec2.ResetSnapshotAttributeInput) (*request.Request, *ec2.ResetSnapshotAttributeOutput) + + ResetSnapshotAttribute(*ec2.ResetSnapshotAttributeInput) (*ec2.ResetSnapshotAttributeOutput, error) + + RestoreAddressToClassicRequest(*ec2.RestoreAddressToClassicInput) (*request.Request, *ec2.RestoreAddressToClassicOutput) + + RestoreAddressToClassic(*ec2.RestoreAddressToClassicInput) (*ec2.RestoreAddressToClassicOutput, error) + + RevokeSecurityGroupEgressRequest(*ec2.RevokeSecurityGroupEgressInput) (*request.Request, *ec2.RevokeSecurityGroupEgressOutput) + + RevokeSecurityGroupEgress(*ec2.RevokeSecurityGroupEgressInput) (*ec2.RevokeSecurityGroupEgressOutput, error) + + RevokeSecurityGroupIngressRequest(*ec2.RevokeSecurityGroupIngressInput) (*request.Request, *ec2.RevokeSecurityGroupIngressOutput) + + RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) + + RunInstancesRequest(*ec2.RunInstancesInput) (*request.Request, *ec2.Reservation) + + RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error) + + StartInstancesRequest(*ec2.StartInstancesInput) (*request.Request, *ec2.StartInstancesOutput) + + StartInstances(*ec2.StartInstancesInput) (*ec2.StartInstancesOutput, error) + + StopInstancesRequest(*ec2.StopInstancesInput) (*request.Request, *ec2.StopInstancesOutput) + + StopInstances(*ec2.StopInstancesInput) (*ec2.StopInstancesOutput, error) + + TerminateInstancesRequest(*ec2.TerminateInstancesInput) (*request.Request, *ec2.TerminateInstancesOutput) + + TerminateInstances(*ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error) + + UnassignPrivateIpAddressesRequest(*ec2.UnassignPrivateIpAddressesInput) (*request.Request, *ec2.UnassignPrivateIpAddressesOutput) + + UnassignPrivateIpAddresses(*ec2.UnassignPrivateIpAddressesInput) (*ec2.UnassignPrivateIpAddressesOutput, error) + + UnmonitorInstancesRequest(*ec2.UnmonitorInstancesInput) (*request.Request, *ec2.UnmonitorInstancesOutput) + + UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error) +} + +var _ EC2API = (*ec2.EC2)(nil) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go new file mode 100644 index 0000000000000..86557fc15d02a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go @@ -0,0 +1,5188 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEC2_AcceptVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.AcceptVpcPeeringConnectionInput{ + DryRun: aws.Bool(true), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.AcceptVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AllocateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.AllocateAddressInput{ + Domain: aws.String("DomainType"), + DryRun: aws.Bool(true), + } + resp, err := svc.AllocateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssignPrivateIpAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.AssignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String("String"), // Required + AllowReassignment: aws.Bool(true), + PrivateIpAddresses: []*string{ + aws.String("String"), // Required + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + } + resp, err := svc.AssignPrivateIpAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateAddressInput{ + AllocationId: aws.String("String"), + AllowReassociation: aws.Bool(true), + DryRun: aws.Bool(true), + InstanceId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PublicIp: aws.String("String"), + } + resp, err := svc.AssociateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateDhcpOptionsInput{ + DhcpOptionsId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AssociateDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateRouteTableInput{ + RouteTableId: aws.String("String"), // Required + SubnetId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AssociateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachClassicLinkVpc() { + svc := ec2.New(session.New()) + + params := &ec2.AttachClassicLinkVpcInput{ + Groups: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + InstanceId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachClassicLinkVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(1), // Required + InstanceId: aws.String("String"), // Required + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachVolume() { + svc := ec2.New(session.New()) + + params := &ec2.AttachVolumeInput{ + Device: aws.String("String"), // Required + InstanceId: aws.String("String"), // Required + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.AttachVpnGatewayInput{ + VpcId: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AuthorizeSecurityGroupEgress() { + svc := ec2.New(session.New()) + + params := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.AuthorizeSecurityGroupEgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AuthorizeSecurityGroupIngress() { + svc := ec2.New(session.New()) + + params := &ec2.AuthorizeSecurityGroupIngressInput{ + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.AuthorizeSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_BundleInstance() { + svc := ec2.New(session.New()) + + params := &ec2.BundleInstanceInput{ + InstanceId: aws.String("String"), // Required + Storage: &ec2.Storage{ // Required + S3: &ec2.S3Storage{ + AWSAccessKeyId: aws.String("String"), + Bucket: aws.String("String"), + Prefix: aws.String("String"), + UploadPolicy: []byte("PAYLOAD"), + UploadPolicySignature: aws.String("String"), + }, + }, + DryRun: aws.Bool(true), + } + resp, err := svc.BundleInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelBundleTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelBundleTaskInput{ + BundleId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CancelBundleTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelConversionTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelConversionTaskInput{ + ConversionTaskId: aws.String("String"), // Required + DryRun: aws.Bool(true), + ReasonMessage: aws.String("String"), + } + resp, err := svc.CancelConversionTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelExportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelExportTaskInput{ + ExportTaskId: aws.String("String"), // Required + } + resp, err := svc.CancelExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelImportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelImportTaskInput{ + CancelReason: aws.String("String"), + DryRun: aws.Bool(true), + ImportTaskId: aws.String("String"), + } + resp, err := svc.CancelImportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelReservedInstancesListing() { + svc := ec2.New(session.New()) + + params := &ec2.CancelReservedInstancesListingInput{ + ReservedInstancesListingId: aws.String("String"), // Required + } + resp, err := svc.CancelReservedInstancesListing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelSpotFleetRequests() { + svc := ec2.New(session.New()) + + params := &ec2.CancelSpotFleetRequestsInput{ + SpotFleetRequestIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TerminateInstances: aws.Bool(true), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CancelSpotFleetRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelSpotInstanceRequests() { + svc := ec2.New(session.New()) + + params := &ec2.CancelSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CancelSpotInstanceRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ConfirmProductInstance() { + svc := ec2.New(session.New()) + + params := &ec2.ConfirmProductInstanceInput{ + InstanceId: aws.String("String"), // Required + ProductCode: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ConfirmProductInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CopyImage() { + svc := ec2.New(session.New()) + + params := &ec2.CopyImageInput{ + Name: aws.String("String"), // Required + SourceImageId: aws.String("String"), // Required + SourceRegion: aws.String("String"), // Required + ClientToken: aws.String("String"), + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CopyImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CopySnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.CopySnapshotInput{ + SourceRegion: aws.String("String"), // Required + SourceSnapshotId: aws.String("String"), // Required + Description: aws.String("String"), + DestinationRegion: aws.String("String"), + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + KmsKeyId: aws.String("String"), + PresignedUrl: aws.String("String"), + } + resp, err := svc.CopySnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateCustomerGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateCustomerGatewayInput{ + BgpAsn: aws.Int64(1), // Required + PublicIp: aws.String("String"), // Required + Type: aws.String("GatewayType"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateCustomerGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.CreateDhcpOptionsInput{ + DhcpConfigurations: []*ec2.NewDhcpConfiguration{ // Required + { // Required + Key: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CreateDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.CreateFlowLogsInput{ + DeliverLogsPermissionArn: aws.String("String"), // Required + LogGroupName: aws.String("String"), // Required + ResourceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ResourceType: aws.String("FlowLogsResourceType"), // Required + TrafficType: aws.String("TrafficType"), // Required + ClientToken: aws.String("String"), + } + resp, err := svc.CreateFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateImage() { + svc := ec2.New(session.New()) + + params := &ec2.CreateImageInput{ + InstanceId: aws.String("String"), // Required + Name: aws.String("String"), // Required + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + NoReboot: aws.Bool(true), + } + resp, err := svc.CreateImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateInstanceExportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CreateInstanceExportTaskInput{ + InstanceId: aws.String("String"), // Required + Description: aws.String("String"), + ExportToS3Task: &ec2.ExportToS3TaskSpecification{ + ContainerFormat: aws.String("ContainerFormat"), + DiskImageFormat: aws.String("DiskImageFormat"), + S3Bucket: aws.String("String"), + S3Prefix: aws.String("String"), + }, + TargetEnvironment: aws.String("ExportEnvironment"), + } + resp, err := svc.CreateInstanceExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateInternetGatewayInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.CreateInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.CreateKeyPairInput{ + KeyName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkAcl() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkAclInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateNetworkAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkAclEntryInput{ + CidrBlock: aws.String("String"), // Required + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + Protocol: aws.String("String"), // Required + RuleAction: aws.String("RuleAction"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + IcmpTypeCode: &ec2.IcmpTypeCode{ + Code: aws.Int64(1), + Type: aws.Int64(1), + }, + PortRange: &ec2.PortRange{ + From: aws.Int64(1), + To: aws.Int64(1), + }, + } + resp, err := svc.CreateNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkInterfaceInput{ + SubnetId: aws.String("String"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + } + resp, err := svc.CreateNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreatePlacementGroup() { + svc := ec2.New(session.New()) + + params := &ec2.CreatePlacementGroupInput{ + GroupName: aws.String("String"), // Required + Strategy: aws.String("PlacementStrategy"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreatePlacementGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateReservedInstancesListing() { + svc := ec2.New(session.New()) + + params := &ec2.CreateReservedInstancesListingInput{ + ClientToken: aws.String("String"), // Required + InstanceCount: aws.Int64(1), // Required + PriceSchedules: []*ec2.PriceScheduleSpecification{ // Required + { // Required + CurrencyCode: aws.String("CurrencyCodeValues"), + Price: aws.Float64(1.0), + Term: aws.Int64(1), + }, + // More values... + }, + ReservedInstancesId: aws.String("String"), // Required + } + resp, err := svc.CreateReservedInstancesListing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateRoute() { + svc := ec2.New(session.New()) + + params := &ec2.CreateRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + GatewayId: aws.String("String"), + InstanceId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.CreateRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.CreateRouteTableInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSecurityGroup() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSecurityGroupInput{ + Description: aws.String("String"), // Required + GroupName: aws.String("String"), // Required + DryRun: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CreateSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSnapshotInput{ + VolumeId: aws.String("String"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSpotDatafeedSubscriptionInput{ + Bucket: aws.String("String"), // Required + DryRun: aws.Bool(true), + Prefix: aws.String("String"), + } + resp, err := svc.CreateSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSubnet() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSubnetInput{ + CidrBlock: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateSubnet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateTags() { + svc := ec2.New(session.New()) + + params := &ec2.CreateTagsInput{ + Resources: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*ec2.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVolume() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVolumeInput{ + AvailabilityZone: aws.String("String"), // Required + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + KmsKeyId: aws.String("String"), + Size: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeType: aws.String("VolumeType"), + } + resp, err := svc.CreateVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpc() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcInput{ + CidrBlock: aws.String("String"), // Required + DryRun: aws.Bool(true), + InstanceTenancy: aws.String("Tenancy"), + } + resp, err := svc.CreateVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpcEndpoint() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcEndpointInput{ + ServiceName: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + PolicyDocument: aws.String("String"), + RouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateVpcEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcPeeringConnectionInput{ + DryRun: aws.Bool(true), + PeerOwnerId: aws.String("String"), + PeerVpcId: aws.String("String"), + VpcId: aws.String("String"), + } + resp, err := svc.CreateVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnConnection() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnConnectionInput{ + CustomerGatewayId: aws.String("String"), // Required + Type: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + Options: &ec2.VpnConnectionOptionsSpecification{ + StaticRoutesOnly: aws.Bool(true), + }, + } + resp, err := svc.CreateVpnConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnConnectionRoute() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + VpnConnectionId: aws.String("String"), // Required + } + resp, err := svc.CreateVpnConnectionRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnGatewayInput{ + Type: aws.String("GatewayType"), // Required + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteCustomerGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteCustomerGatewayInput{ + CustomerGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteCustomerGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteDhcpOptionsInput{ + DhcpOptionsId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteFlowLogsInput{ + FlowLogIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DeleteFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteKeyPairInput{ + KeyName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkAcl() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkAclInput{ + NetworkAclId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkAclEntryInput{ + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkInterfaceInput{ + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeletePlacementGroup() { + svc := ec2.New(session.New()) + + params := &ec2.DeletePlacementGroupInput{ + GroupName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeletePlacementGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteRoute() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteRouteTableInput{ + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSecurityGroup() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSecurityGroupInput{ + DryRun: aws.Bool(true), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + } + resp, err := svc.DeleteSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSnapshotInput{ + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSpotDatafeedSubscriptionInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSubnet() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSubnetInput{ + SubnetId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSubnet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteTags() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteTagsInput{ + Resources: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Tags: []*ec2.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVolume() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVolumeInput{ + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpc() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpcEndpoints() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcEndpointsInput{ + VpcEndpointIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpcEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnConnection() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnConnectionInput{ + VpnConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpnConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnConnectionRoute() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + VpnConnectionId: aws.String("String"), // Required + } + resp, err := svc.DeleteVpnConnectionRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnGatewayInput{ + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeregisterImage() { + svc := ec2.New(session.New()) + + params := &ec2.DeregisterImageInput{ + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeregisterImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAccountAttributes() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAccountAttributesInput{ + AttributeNames: []*string{ + aws.String("AccountAttributeName"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeAccountAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAddressesInput{ + AllocationIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + PublicIps: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAvailabilityZones() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAvailabilityZonesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ZoneNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeAvailabilityZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeBundleTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeBundleTasksInput{ + BundleIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeBundleTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeClassicLinkInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeClassicLinkInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeClassicLinkInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeConversionTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeConversionTasksInput{ + ConversionTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeConversionTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeCustomerGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeCustomerGatewaysInput{ + CustomerGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeCustomerGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeExportTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeExportTasksInput{ + ExportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeExportTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeFlowLogsInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + FlowLogIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImageAttributeInput{ + Attribute: aws.String("ImageAttributeName"), // Required + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImages() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImagesInput{ + DryRun: aws.Bool(true), + ExecutableUsers: []*string{ + aws.String("String"), // Required + // More values... + }, + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImageIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Owners: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeImages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImportImageTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImportImageTasksInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeImportImageTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImportSnapshotTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImportSnapshotTasksInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeImportSnapshotTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstanceAttributeInput{ + Attribute: aws.String("InstanceAttributeName"), // Required + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstanceStatus() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstanceStatusInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludeAllInstances: aws.Bool(true), + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeInstanceStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInternetGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInternetGatewaysInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InternetGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeInternetGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeKeyPairs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeKeyPairsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + KeyNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeKeyPairs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeMovingAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeMovingAddressesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + PublicIps: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeMovingAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkAcls() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkAclsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NetworkAclIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeNetworkAcls(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + Attribute: aws.String("NetworkInterfaceAttribute"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkInterfaces() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkInterfacesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NetworkInterfaceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeNetworkInterfaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribePlacementGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribePlacementGroupsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribePlacementGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribePrefixLists() { + svc := ec2.New(session.New()) + + params := &ec2.DescribePrefixListsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + PrefixListIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribePrefixLists(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeRegions() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeRegionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + RegionNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRegions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + OfferingType: aws.String("OfferingTypeValues"), + ReservedInstancesIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesListings() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesListingsInput{ + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ReservedInstancesId: aws.String("String"), + ReservedInstancesListingId: aws.String("String"), + } + resp, err := svc.DescribeReservedInstancesListings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesModifications() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesModificationsInput{ + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NextToken: aws.String("String"), + ReservedInstancesModificationIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstancesModifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesOfferings() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesOfferingsInput{ + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludeMarketplace: aws.Bool(true), + InstanceTenancy: aws.String("Tenancy"), + InstanceType: aws.String("InstanceType"), + MaxDuration: aws.Int64(1), + MaxInstanceCount: aws.Int64(1), + MaxResults: aws.Int64(1), + MinDuration: aws.Int64(1), + NextToken: aws.String("String"), + OfferingType: aws.String("OfferingTypeValues"), + ProductDescription: aws.String("RIProductDescription"), + ReservedInstancesOfferingIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstancesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeRouteTables() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeRouteTablesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + RouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRouteTables(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSecurityGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSecurityGroupsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + GroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSnapshotAttributeInput{ + Attribute: aws.String("SnapshotAttributeName"), // Required + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSnapshots() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSnapshotsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + OwnerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + RestorableByUserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotDatafeedSubscriptionInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetInstancesInput{ + SpotFleetRequestId: aws.String("String"), // Required + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeSpotFleetInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetRequestHistory() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetRequestHistoryInput{ + SpotFleetRequestId: aws.String("String"), // Required + StartTime: aws.Time(time.Now()), // Required + DryRun: aws.Bool(true), + EventType: aws.String("EventType"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeSpotFleetRequestHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetRequests() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetRequestsInput{ + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + SpotFleetRequestIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSpotFleetRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotInstanceRequests() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotInstanceRequestsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SpotInstanceRequestIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSpotInstanceRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotPriceHistory() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotPriceHistoryInput{ + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + EndTime: aws.Time(time.Now()), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceTypes: []*string{ + aws.String("InstanceType"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ProductDescriptions: []*string{ + aws.String("String"), // Required + // More values... + }, + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeSpotPriceHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSubnets() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSubnetsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SubnetIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeTags() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeTagsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumeAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumeAttributeInput{ + VolumeId: aws.String("String"), // Required + Attribute: aws.String("VolumeAttributeName"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeVolumeAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumeStatus() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumeStatusInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumeStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumes() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcAttributeInput{ + VpcId: aws.String("String"), // Required + Attribute: aws.String("VpcAttributeName"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeVpcAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcClassicLinkInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcEndpointServices() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcEndpointServicesInput{ + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeVpcEndpointServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcEndpoints() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcEndpointsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VpcEndpointIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcPeeringConnections() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcPeeringConnectionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcPeeringConnectionIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcPeeringConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpnConnections() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpnConnectionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpnConnectionIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpnConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpnGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpnGatewaysInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpnGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpnGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachClassicLinkVpc() { + svc := ec2.New(session.New()) + + params := &ec2.DetachClassicLinkVpcInput{ + InstanceId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachClassicLinkVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DetachInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.DetachNetworkInterfaceInput{ + AttachmentId: aws.String("String"), // Required + DryRun: aws.Bool(true), + Force: aws.Bool(true), + } + resp, err := svc.DetachNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachVolume() { + svc := ec2.New(session.New()) + + params := &ec2.DetachVolumeInput{ + VolumeId: aws.String("String"), // Required + Device: aws.String("String"), + DryRun: aws.Bool(true), + Force: aws.Bool(true), + InstanceId: aws.String("String"), + } + resp, err := svc.DetachVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DetachVpnGatewayInput{ + VpcId: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVgwRoutePropagation() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVgwRoutePropagationInput{ + GatewayId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + } + resp, err := svc.DisableVgwRoutePropagation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVpcClassicLinkInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DisableVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisassociateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.DisassociateAddressInput{ + AssociationId: aws.String("String"), + DryRun: aws.Bool(true), + PublicIp: aws.String("String"), + } + resp, err := svc.DisassociateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisassociateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.DisassociateRouteTableInput{ + AssociationId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DisassociateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVgwRoutePropagation() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVgwRoutePropagationInput{ + GatewayId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + } + resp, err := svc.EnableVgwRoutePropagation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVolumeIO() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVolumeIOInput{ + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.EnableVolumeIO(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVpcClassicLinkInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.EnableVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetConsoleOutput() { + svc := ec2.New(session.New()) + + params := &ec2.GetConsoleOutputInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.GetConsoleOutput(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetPasswordData() { + svc := ec2.New(session.New()) + + params := &ec2.GetPasswordDataInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.GetPasswordData(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportImage() { + svc := ec2.New(session.New()) + + params := &ec2.ImportImageInput{ + Architecture: aws.String("String"), + ClientData: &ec2.ClientData{ + Comment: aws.String("String"), + UploadEnd: aws.Time(time.Now()), + UploadSize: aws.Float64(1.0), + UploadStart: aws.Time(time.Now()), + }, + ClientToken: aws.String("String"), + Description: aws.String("String"), + DiskContainers: []*ec2.ImageDiskContainer{ + { // Required + Description: aws.String("String"), + DeviceName: aws.String("String"), + Format: aws.String("String"), + SnapshotId: aws.String("String"), + Url: aws.String("String"), + UserBucket: &ec2.UserBucket{ + S3Bucket: aws.String("String"), + S3Key: aws.String("String"), + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + Hypervisor: aws.String("String"), + LicenseType: aws.String("String"), + Platform: aws.String("String"), + RoleName: aws.String("String"), + } + resp, err := svc.ImportImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportInstance() { + svc := ec2.New(session.New()) + + params := &ec2.ImportInstanceInput{ + Platform: aws.String("PlatformValues"), // Required + Description: aws.String("String"), + DiskImages: []*ec2.DiskImage{ + { // Required + Description: aws.String("String"), + Image: &ec2.DiskImageDetail{ + Bytes: aws.Int64(1), // Required + Format: aws.String("DiskImageFormat"), // Required + ImportManifestUrl: aws.String("String"), // Required + }, + Volume: &ec2.VolumeDetail{ + Size: aws.Int64(1), // Required + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + LaunchSpecification: &ec2.ImportInstanceLaunchSpecification{ + AdditionalInfo: aws.String("String"), + Architecture: aws.String("ArchitectureValues"), + GroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"), + InstanceType: aws.String("InstanceType"), + Monitoring: aws.Bool(true), + Placement: &ec2.Placement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + Tenancy: aws.String("Tenancy"), + }, + PrivateIpAddress: aws.String("String"), + SubnetId: aws.String("String"), + UserData: &ec2.UserData{ + Data: aws.String("String"), + }, + }, + } + resp, err := svc.ImportInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.ImportKeyPairInput{ + KeyName: aws.String("String"), // Required + PublicKeyMaterial: []byte("PAYLOAD"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ImportKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.ImportSnapshotInput{ + ClientData: &ec2.ClientData{ + Comment: aws.String("String"), + UploadEnd: aws.Time(time.Now()), + UploadSize: aws.Float64(1.0), + UploadStart: aws.Time(time.Now()), + }, + ClientToken: aws.String("String"), + Description: aws.String("String"), + DiskContainer: &ec2.SnapshotDiskContainer{ + Description: aws.String("String"), + Format: aws.String("String"), + Url: aws.String("String"), + UserBucket: &ec2.UserBucket{ + S3Bucket: aws.String("String"), + S3Key: aws.String("String"), + }, + }, + DryRun: aws.Bool(true), + RoleName: aws.String("String"), + } + resp, err := svc.ImportSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportVolume() { + svc := ec2.New(session.New()) + + params := &ec2.ImportVolumeInput{ + AvailabilityZone: aws.String("String"), // Required + Image: &ec2.DiskImageDetail{ // Required + Bytes: aws.Int64(1), // Required + Format: aws.String("DiskImageFormat"), // Required + ImportManifestUrl: aws.String("String"), // Required + }, + Volume: &ec2.VolumeDetail{ // Required + Size: aws.Int64(1), // Required + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.ImportVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyImageAttributeInput{ + ImageId: aws.String("String"), // Required + Attribute: aws.String("String"), + Description: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + DryRun: aws.Bool(true), + LaunchPermission: &ec2.LaunchPermissionModifications{ + Add: []*ec2.LaunchPermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + Remove: []*ec2.LaunchPermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + OperationType: aws.String("OperationType"), + ProductCodes: []*string{ + aws.String("String"), // Required + // More values... + }, + UserGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + UserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Value: aws.String("String"), + } + resp, err := svc.ModifyImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String("String"), // Required + Attribute: aws.String("InstanceAttributeName"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMappingSpecification{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsInstanceBlockDeviceSpecification{ + DeleteOnTermination: aws.Bool(true), + VolumeId: aws.String("String"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + DisableApiTermination: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + DryRun: aws.Bool(true), + EbsOptimized: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + InstanceInitiatedShutdownBehavior: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + InstanceType: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + Kernel: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + Ramdisk: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SriovNetSupport: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + UserData: &ec2.BlobAttributeValue{ + Value: []byte("PAYLOAD"), + }, + Value: aws.String("String"), + } + resp, err := svc.ModifyInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + Attachment: &ec2.NetworkInterfaceAttachmentChanges{ + AttachmentId: aws.String("String"), + DeleteOnTermination: aws.Bool(true), + }, + Description: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + DryRun: aws.Bool(true), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifyNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyReservedInstances() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyReservedInstancesInput{ + ReservedInstancesIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TargetConfigurations: []*ec2.ReservedInstancesConfiguration{ // Required + { // Required + AvailabilityZone: aws.String("String"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("InstanceType"), + Platform: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + } + resp, err := svc.ModifyReservedInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySnapshotAttributeInput{ + SnapshotId: aws.String("String"), // Required + Attribute: aws.String("SnapshotAttributeName"), + CreateVolumePermission: &ec2.CreateVolumePermissionModifications{ + Add: []*ec2.CreateVolumePermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + Remove: []*ec2.CreateVolumePermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + DryRun: aws.Bool(true), + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + OperationType: aws.String("OperationType"), + UserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifySnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySpotFleetRequest() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySpotFleetRequestInput{ + SpotFleetRequestId: aws.String("String"), // Required + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + TargetCapacity: aws.Int64(1), + } + resp, err := svc.ModifySpotFleetRequest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySubnetAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySubnetAttributeInput{ + SubnetId: aws.String("String"), // Required + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifySubnetAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVolumeAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVolumeAttributeInput{ + VolumeId: aws.String("String"), // Required + AutoEnableIO: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + DryRun: aws.Bool(true), + } + resp, err := svc.ModifyVolumeAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcAttributeInput{ + VpcId: aws.String("String"), // Required + EnableDnsHostnames: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + EnableDnsSupport: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifyVpcAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcEndpoint() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcEndpointInput{ + VpcEndpointId: aws.String("String"), // Required + AddRouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + PolicyDocument: aws.String("String"), + RemoveRouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + ResetPolicy: aws.Bool(true), + } + resp, err := svc.ModifyVpcEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_MonitorInstances() { + svc := ec2.New(session.New()) + + params := &ec2.MonitorInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.MonitorInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_MoveAddressToVpc() { + svc := ec2.New(session.New()) + + params := &ec2.MoveAddressToVpcInput{ + PublicIp: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.MoveAddressToVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_PurchaseReservedInstancesOffering() { + svc := ec2.New(session.New()) + + params := &ec2.PurchaseReservedInstancesOfferingInput{ + InstanceCount: aws.Int64(1), // Required + ReservedInstancesOfferingId: aws.String("String"), // Required + DryRun: aws.Bool(true), + LimitPrice: &ec2.ReservedInstanceLimitPrice{ + Amount: aws.Float64(1.0), + CurrencyCode: aws.String("CurrencyCodeValues"), + }, + } + resp, err := svc.PurchaseReservedInstancesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RebootInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RebootInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.RebootInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RegisterImage() { + svc := ec2.New(session.New()) + + params := &ec2.RegisterImageInput{ + Name: aws.String("String"), // Required + Architecture: aws.String("ArchitectureValues"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + ImageLocation: aws.String("String"), + KernelId: aws.String("String"), + RamdiskId: aws.String("String"), + RootDeviceName: aws.String("String"), + SriovNetSupport: aws.String("String"), + VirtualizationType: aws.String("String"), + } + resp, err := svc.RegisterImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RejectVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.RejectVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.RejectVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReleaseAddress() { + svc := ec2.New(session.New()) + + params := &ec2.ReleaseAddressInput{ + AllocationId: aws.String("String"), + DryRun: aws.Bool(true), + PublicIp: aws.String("String"), + } + resp, err := svc.ReleaseAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceNetworkAclAssociation() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: aws.String("String"), // Required + NetworkAclId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ReplaceNetworkAclAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceNetworkAclEntryInput{ + CidrBlock: aws.String("String"), // Required + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + Protocol: aws.String("String"), // Required + RuleAction: aws.String("RuleAction"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + IcmpTypeCode: &ec2.IcmpTypeCode{ + Code: aws.Int64(1), + Type: aws.Int64(1), + }, + PortRange: &ec2.PortRange{ + From: aws.Int64(1), + To: aws.Int64(1), + }, + } + resp, err := svc.ReplaceNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceRoute() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + GatewayId: aws.String("String"), + InstanceId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.ReplaceRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceRouteTableAssociation() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceRouteTableAssociationInput{ + AssociationId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ReplaceRouteTableAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReportInstanceStatus() { + svc := ec2.New(session.New()) + + params := &ec2.ReportInstanceStatusInput{ + Instances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ReasonCodes: []*string{ // Required + aws.String("ReportInstanceReasonCodes"), // Required + // More values... + }, + Status: aws.String("ReportStatusType"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + EndTime: aws.Time(time.Now()), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.ReportInstanceStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RequestSpotFleet() { + svc := ec2.New(session.New()) + + params := &ec2.RequestSpotFleetInput{ + SpotFleetRequestConfig: &ec2.SpotFleetRequestConfigData{ // Required + IamFleetRole: aws.String("String"), // Required + LaunchSpecifications: []*ec2.SpotFleetLaunchSpecification{ // Required + { // Required + AddressingType: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + ImageId: aws.String("String"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.SpotFleetMonitoring{ + Enabled: aws.Bool(true), + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.SpotPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroups: []*ec2.GroupIdentifier{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + }, + // More values... + }, + SpotPrice: aws.String("String"), + SubnetId: aws.String("String"), + UserData: aws.String("String"), + WeightedCapacity: aws.Float64(1.0), + }, + // More values... + }, + SpotPrice: aws.String("String"), // Required + TargetCapacity: aws.Int64(1), // Required + AllocationStrategy: aws.String("AllocationStrategy"), + ClientToken: aws.String("String"), + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + TerminateInstancesWithExpiration: aws.Bool(true), + ValidFrom: aws.Time(time.Now()), + ValidUntil: aws.Time(time.Now()), + }, + DryRun: aws.Bool(true), + } + resp, err := svc.RequestSpotFleet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RequestSpotInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RequestSpotInstancesInput{ + SpotPrice: aws.String("String"), // Required + AvailabilityZoneGroup: aws.String("String"), + BlockDurationMinutes: aws.Int64(1), + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + InstanceCount: aws.Int64(1), + LaunchGroup: aws.String("String"), + LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ + AddressingType: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + ImageId: aws.String("String"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(true), // Required + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.SpotPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + }, + Type: aws.String("SpotInstanceType"), + ValidFrom: aws.Time(time.Now()), + ValidUntil: aws.Time(time.Now()), + } + resp, err := svc.RequestSpotInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetImageAttributeInput{ + Attribute: aws.String("ResetImageAttributeName"), // Required + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetInstanceAttributeInput{ + Attribute: aws.String("InstanceAttributeName"), // Required + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + SourceDestCheck: aws.String("String"), + } + resp, err := svc.ResetNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetSnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetSnapshotAttributeInput{ + Attribute: aws.String("SnapshotAttributeName"), // Required + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RestoreAddressToClassic() { + svc := ec2.New(session.New()) + + params := &ec2.RestoreAddressToClassicInput{ + PublicIp: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.RestoreAddressToClassic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RevokeSecurityGroupEgress() { + svc := ec2.New(session.New()) + + params := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.RevokeSecurityGroupEgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RevokeSecurityGroupIngress() { + svc := ec2.New(session.New()) + + params := &ec2.RevokeSecurityGroupIngressInput{ + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.RevokeSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RunInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RunInstancesInput{ + ImageId: aws.String("String"), // Required + MaxCount: aws.Int64(1), // Required + MinCount: aws.Int64(1), // Required + AdditionalInfo: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + DisableApiTermination: aws.Bool(true), + DryRun: aws.Bool(true), + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(true), // Required + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.Placement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + Tenancy: aws.String("Tenancy"), + }, + PrivateIpAddress: aws.String("String"), + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + } + resp, err := svc.RunInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_StartInstances() { + svc := ec2.New(session.New()) + + params := &ec2.StartInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + AdditionalInfo: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.StartInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_StopInstances() { + svc := ec2.New(session.New()) + + params := &ec2.StopInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Force: aws.Bool(true), + } + resp, err := svc.StopInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_TerminateInstances() { + svc := ec2.New(session.New()) + + params := &ec2.TerminateInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.TerminateInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_UnassignPrivateIpAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.UnassignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String("String"), // Required + PrivateIpAddresses: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.UnassignPrivateIpAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_UnmonitorInstances() { + svc := ec2.New(session.New()) + + params := &ec2.UnmonitorInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.UnmonitorInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/service.go new file mode 100644 index 0000000000000..2ff4220f7dac0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity +// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your +// need to invest in hardware up front, so you can develop and deploy applications +// faster. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EC2 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ec2" + +// New creates a new instance of the EC2 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EC2 client from just a session. +// svc := ec2.New(mySession) +// +// // Create a EC2 client with additional configuration +// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EC2 { + svc := &EC2{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-10-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBack(ec2query.Build) + svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal) + svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta) + svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EC2 operation and runs any +// custom request initialization. +func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/waiters.go new file mode 100644 index 0000000000000..1b28317a18b7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -0,0 +1,761 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeBundleTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "BundleTasks[].State", + Expected: "complete", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "BundleTasks[].State", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "completed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ConversionTasks[].State", + Expected: "cancelling", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCustomerGateways", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CustomerGateways[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CustomerGateways[].State", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CustomerGateways[].State", + Expected: "deleting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeExportTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ExportTasks[].State", + Expected: "cancelled", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeExportTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ExportTasks[].State", + Expected: "completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeImages", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Images[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Images[].State", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 5, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidInstanceIDNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "running", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "shutting-down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceStatus", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStatuses[].InstanceStatus.Status", + Expected: "ok", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeKeyPairs", + Delay: 5, + MaxAttempts: 6, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "length(KeyPairs[].KeyName) > `0`", + Expected: true, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidKeyPairNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeNetworkInterfaces", + Delay: 20, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "NetworkInterfaces[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "error", + Argument: "", + Expected: "InvalidNetworkInterfaceIDNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error { + waiterCfg := waiter.Config{ + Operation: "GetPasswordData", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(PasswordData) > `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSnapshots", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Snapshots[].State", + Expected: "completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSpotInstanceRequests", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "fulfilled", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "schedule-expired", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "canceled-before-fulfillment", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "bad-parameters", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "system-error", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSubnets", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Subnets[].State", + Expected: "available", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceStatus", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStatuses[].SystemStatus.Status", + Expected: "ok", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "InvalidVolumeNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "in-use", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpcs", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Vpcs[].State", + Expected: "available", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpnConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VpnConnections[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpnConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VpnConnections[].State", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "pending", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore new file mode 100644 index 0000000000000..7adca9439c551 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE new file mode 100644 index 0000000000000..37ec93a14fdcd --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README.md b/Godeps/_workspace/src/github.com/go-ini/ini/README.md new file mode 100644 index 0000000000000..1272038a9e364 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/README.md @@ -0,0 +1,560 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + + go get gopkg.in/ini.v1 + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +To auto-split value into slice: + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 0000000000000..45e19edddff3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,547 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + + go get gopkg.in/ini.v1 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +自动分割键值为切片(slice): + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/ini.go b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go new file mode 100644 index 0000000000000..1fee789a1d61f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go @@ -0,0 +1,1226 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + + _VERSION = "1.6.0" +) + +func Version() string { + return _VERSION +} + +var ( + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Write spaces around "=" to look better. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is a interface that returns file content. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// ____ __. +// | |/ _|____ ___.__. +// | <_/ __ < | | +// | | \ ___/\___ | +// |____|__ \___ > ____| +// \/ \/\/ + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string devide by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 devide by given delimiter. +func (k *Key) Float64s(delim string) []float64 { + strs := k.Strings(delim) + vals := make([]float64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseFloat(strs[i], 64) + } + return vals +} + +// Ints returns list of int devide by given delimiter. +func (k *Key) Ints(delim string) []int { + strs := k.Strings(delim) + vals := make([]int, len(strs)) + for i := range strs { + vals[i], _ = strconv.Atoi(strs[i]) + } + return vals +} + +// Int64s returns list of int64 devide by given delimiter. +func (k *Key) Int64s(delim string) []int64 { + strs := k.Strings(delim) + vals := make([]int64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseInt(strs[i], 10, 64) + } + return vals +} + +// Uints returns list of uint devide by given delimiter. +func (k *Key) Uints(delim string) []uint { + strs := k.Strings(delim) + vals := make([]uint, len(strs)) + for i := range strs { + u, _ := strconv.ParseUint(strs[i], 10, 64) + vals[i] = uint(u) + } + return vals +} + +// Uint64s returns list of uint64 devide by given delimiter. +func (k *Key) Uint64s(delim string) []uint64 { + strs := k.Strings(delim) + vals := make([]uint64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseUint(strs[i], 10, 64) + } + return vals +} + +// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. +func (k *Key) TimesFormat(format, delim string) []time.Time { + strs := k.Strings(delim) + vals := make([]time.Time, len(strs)) + for i := range strs { + vals[i], _ = time.Parse(format, strs[i]) + } + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + k.value = v +} + +// _________ __ .__ +// / _____/ ____ _____/ |_|__| ____ ____ +// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ +// / \ ___/\ \___| | | ( <_> ) | \ +// /_______ /\___ >\___ >__| |__|\____/|___| / +// \/ \/ \/ \/ + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ___________.__.__ +// \_ _____/|__| | ____ +// | __) | | | _/ __ \ +// | \ | | |_\ ___/ +// \___ / |__|____/\___ > +// \/ \/ + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +func Load(source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources) + return f, f.Reload() +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func cutComment(str string) string { + i := strings.Index(str, "#") + if i == -1 { + return str + } + return str[:i] +} + +func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { + isEnd := false + for { + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", err + } + isEnd = true + } + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + break + } + val += next + if isEnd { + return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { + isEnd := false + for { + valLen := len(val) + if valLen == 0 || val[valLen-1] != '\\' { + break + } + val = val[:valLen-1] + + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", isEnd, err + } + isEnd = true + } + + next = strings.TrimSpace(next) + if len(next) == 0 { + break + } + val += next + } + return val, isEnd, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) error { + buf := bufio.NewReader(reader) + + // Handle BOM-UTF8. + // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding + mask, err := buf.Peek(3) + if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + buf.Read(mask) + } + + count := 1 + comments := "" + isEnd := false + + section, err := f.NewSection(DEFAULT_SECTION) + if err != nil { + return err + } + + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + length := len(line) + + // Check error and ignore io.EOF just for a moment. + if err != nil { + if err != io.EOF { + return fmt.Errorf("error reading next line: %v", err) + } + // The last line of file could be an empty line. + if length == 0 { + break + } + isEnd = true + } + + // Skip empty lines. + if length == 0 { + continue + } + + switch { + case line[0] == '#' || line[0] == ';': // Comments. + if len(comments) == 0 { + comments = line + } else { + comments += LineBreak + line + } + continue + case line[0] == '[' && line[length-1] == ']': // New sction. + section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) + if err != nil { + return err + } + + if len(comments) > 0 { + section.Comment = comments + comments = "" + } + // Reset counter. + count = 1 + continue + } + + // Other possibilities. + var ( + i int + keyQuote string + kname string + valQuote string + val string + ) + + // Key name surrounded by quotes. + if line[0] == '"' { + if length > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + if len(keyQuote) > 0 { + qLen := len(keyQuote) + pos := strings.Index(line[qLen:], keyQuote) + if pos == -1 { + return fmt.Errorf("error parsing line: missing closing key quote: %s", line) + } + pos = pos + qLen + i = strings.IndexAny(line[pos:], "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == pos { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + i = i + pos + kname = line[qLen:pos] // Just keep spaces inside quotes. + } else { + i = strings.IndexAny(line, "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == 0 { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + kname = strings.TrimSpace(line[0:i]) + } + + isAutoIncr := false + // Auto increment. + if kname == "-" { + isAutoIncr = true + kname = "#" + fmt.Sprint(count) + count++ + } + + lineRight := strings.TrimSpace(line[i+1:]) + lineRightLength := len(lineRight) + firstChar := "" + if lineRightLength >= 2 { + firstChar = lineRight[0:1] + } + if firstChar == "`" { + valQuote = "`" + } else if firstChar == `"` { + if lineRightLength >= 3 && lineRight[0:3] == `"""` { + valQuote = `"""` + } else { + valQuote = `"` + } + } else if firstChar == `'` { + valQuote = `'` + } + + if len(valQuote) > 0 { + qLen := len(valQuote) + pos := strings.LastIndex(lineRight[qLen:], valQuote) + // For multiple-line value check. + if pos == -1 { + if valQuote == `"` || valQuote == `'` { + return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) + } + + val = lineRight[qLen:] + "\n" + val, err = checkMultipleLines(buf, line, val, valQuote) + if err != nil { + return err + } + } else { + val = lineRight[qLen : pos+qLen] + } + } else { + val = strings.TrimSpace(cutComment(lineRight)) + val, isEnd, err = checkContinuationLines(buf, val) + if err != nil { + return err + } + } + + k, err := section.NewKey(kname, val) + if err != nil { + return err + } + k.isAutoIncr = isAutoIncr + if len(comments) > 0 { + k.Comment = comments + comments = "" + } + + if isEnd { + break + } + } + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes file content into io.Writer with given value indention. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty. + if len(sec.keyList) == 0 { + continue + } + } + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.Contains(kname, "`") || strings.Contains(kname, `"`): + kname = `"""` + kname + `"""` + case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + kname = "`" + kname + "`" + } + + val := key.value + // In case key value contains "\n", "`" or "\"". + if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || + strings.Contains(val, "#") { + val = `"""` + val + `"""` + } + if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/ini_test.go b/Godeps/_workspace/src/github.com/go-ini/ini/ini_test.go new file mode 100644 index 0000000000000..82ff36dd2365c --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/ini_test.go @@ -0,0 +1,512 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Version(t *testing.T) { + Convey("Get version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} + +const _CONF_DATA = ` +; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +# Information about package author +# Bio can be written in multiple lines. +[author] +NAME = Unknwon # Succeeding comment +E-MAIL = fake@localhost +GITHUB = https://github.com/%(NAME)s +BIO = """Gopher. +Coding addict. +Good man. +""" # Succeeding comment + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +UNUSED_KEY = should be deleted + +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values + +[types] +STRING = str +BOOL = true +BOOL_FALSE = false +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 + +[array] +STRINGS = en, zh, de +FLOAT64S = 1.1, 2.2, 3.3 +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] +empty_lines = next line is empty\ + +[advance] +value with quotes = "some value" +value quote2 again = 'some value' +true = """"2+3=5"""" +"1+1=2" = true +"""6+1=7""" = true +"""` + "`" + `5+5` + "`" + `""" = 10 +""""6+6"""" = 12 +` + "`" + `7-2=4` + "`" + ` = false +ADDRESS = ` + "`" + `404 road, +NotFound, State, 50000` + "`" + ` + +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 \ +` + +func Test_Load(t *testing.T) { + Convey("Load from data sources", t, func() { + + Convey("Load with empty data", func() { + So(Empty(), ShouldNotBeNil) + }) + + Convey("Load with multiple data sources", func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + }) + }) + + Convey("Bad load process", t, func() { + + Convey("Load from invalid data sources", func() { + _, err := Load(_CONF_DATA) + So(err, ShouldNotBeNil) + + _, err = Load("testdata/404.ini") + So(err, ShouldNotBeNil) + + _, err = Load(1) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(""), 1) + So(err, ShouldNotBeNil) + }) + + Convey("Load with empty section name", func() { + _, err := Load([]byte("[]")) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad keys", func() { + _, err := Load([]byte(`"""name`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`"""name"""`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`""=1`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`=`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`name`)) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad values", func() { + _, err := Load([]byte(`name="""Unknwon`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`key = "value`)) + So(err, ShouldNotBeNil) + }) + }) +} + +func Test_Values(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) + + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") + + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) + + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) + + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) + + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) + + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) + + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) + + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) + + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) + + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) + + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + for i, v := range []float64{1.1, 2.2, 3.3} { + So(vals1[i], ShouldEqual, v) + } + + vals2 := sec.Key("INTS").Ints(",") + for i, v := range []int{1, 2, 3} { + So(vals2[i], ShouldEqual, v) + } + + vals3 := sec.Key("INTS").Int64s(",") + for i, v := range []int64{1, 2, 3} { + So(vals3[i], ShouldEqual, v) + } + + vals4 := sec.Key("UINTS").Uints(",") + for i, v := range []uint{1, 2, 3} { + So(vals4[i], ShouldEqual, v) + } + + vals5 := sec.Key("UINTS").Uint64s(",") + for i, v := range []uint64{1, 2, 3} { + So(vals5[i], ShouldEqual, v) + } + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + for i, v := range []time.Time{t, t, t} { + So(vals6[i].String(), ShouldEqual, v.String()) + } + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("无闻") + So(k.String(), ShouldEqual, "无闻") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) + + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") + So(err, ShouldBeNil) + }) + }) + + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) +} + +func Test_File_Append(t *testing.T) { + Convey("Append data sources", t, func() { + cfg, err := Load([]byte("")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Append([]byte(""), []byte("")), ShouldBeNil) + + Convey("Append bad data sources", func() { + So(cfg.Append(1), ShouldNotBeNil) + So(cfg.Append([]byte(""), 1), ShouldNotBeNil) + }) + }) +} + +func Test_File_SaveTo(t *testing.T) { + Convey("Save file", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.Section("").Key("NAME").Comment = "Package name" + cfg.Section("author").Comment = `Information about package author +# Bio can be written in multiple lines.` + cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") + So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) + + cfg.Section("author").Key("NAME").Comment = "This is author name" + So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) + }) +} + +func Benchmark_Key_Value(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_String(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_String_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_SetValue(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") + } +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/struct.go b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go new file mode 100644 index 0000000000000..c118437101039 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go @@ -0,0 +1,350 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/struct_test.go b/Godeps/_workspace/src/github.com/go-ini/ini/struct_test.go new file mode 100644 index 0000000000000..d865ad78eb7d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/struct_test.go @@ -0,0 +1,239 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type testNested struct { + Cities []string `delim:"|"` + Visits []time.Time + Note string + Unused int `ini:"-"` +} + +type testEmbeded struct { + GPA float64 +} + +type testStruct struct { + Name string `ini:"NAME"` + Age int + Male bool + Money float64 + Born time.Time + Time time.Duration `ini:"Duration"` + Others testNested + *testEmbeded `ini:"grade"` + Unused int `ini:"-"` + Unsigned uint +} + +const _CONF_DATA_STRUCT = ` +NAME = Unknwon +Age = 21 +Male = true +Money = 1.25 +Born = 1993-10-07T20:17:05Z +Duration = 2h45m +Unsigned = 3 + +[Others] +Cities = HangZhou|Boston +Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z +Note = Hello world! + +[grade] +GPA = 2.8 + +[foo.bar] +Here = there +When = then +` + +type unsupport struct { + Byte byte +} + +type unsupport2 struct { + Others struct { + Cities byte + } +} + +type unsupport3 struct { + Cities byte +} + +type unsupport4 struct { + *unsupport3 `ini:"Others"` +} + +type defaultValue struct { + Name string + Age int + Male bool + Money float64 + Born time.Time + Cities []string +} + +type fooBar struct { + Here, When string +} + +const _INVALID_DATA_CONF_STRUCT = ` +Name = +Age = age +Male = 123 +Money = money +Born = nil +Cities = +` + +func Test_Struct(t *testing.T) { + Convey("Map to struct", t, func() { + Convey("Map file to struct", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Name, ShouldEqual, "Unknwon") + So(ts.Age, ShouldEqual, 21) + So(ts.Male, ShouldBeTrue) + So(ts.Money, ShouldEqual, 1.25) + So(ts.Unsigned, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + So(ts.Born.String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(ts.Time.Seconds(), ShouldEqual, dur.Seconds()) + + So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") + So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) + So(ts.Others.Note, ShouldEqual, "Hello world!") + So(ts.testEmbeded.GPA, ShouldEqual, 2.8) + }) + + Convey("Map section to struct", func() { + foobar := new(fooBar) + f, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + + So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil) + So(foobar.Here, ShouldEqual, "there") + So(foobar.When, ShouldEqual, "then") + }) + + Convey("Map to non-pointer struct", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.MapTo(testStruct{}), ShouldNotBeNil) + }) + + Convey("Map to unsupported type", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = func(raw string) string { + if raw == "Byte" { + return "NAME" + } + return raw + } + So(cfg.MapTo(&unsupport{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport2{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) + }) + + Convey("Map from invalid data source", func() { + So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) + }) + + Convey("Map to wrong types and gain default values", func() { + cfg, err := Load([]byte(_INVALID_DATA_CONF_STRUCT)) + So(err, ShouldBeNil) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}} + So(cfg.MapTo(dv), ShouldBeNil) + So(dv.Name, ShouldEqual, "Joe") + So(dv.Age, ShouldEqual, 10) + So(dv.Male, ShouldBeTrue) + So(dv.Money, ShouldEqual, 1.25) + So(dv.Born.String(), ShouldEqual, t.String()) + So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston") + }) + }) + + Convey("Reflect from struct", t, func() { + type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int + } + type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded `ini:"infos"` + } + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := Empty() + So(ReflectFrom(cfg, a), ShouldBeNil) + cfg.SaveTo("testdata/conf_reflect.ini") + + Convey("Reflect from non-point struct", func() { + So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) + }) + }) +} + +type testMapper struct { + PackageName string +} + +func Test_NameGetter(t *testing.T) { + Convey("Test name mappers", t, func() { + So(MapToWithMapper(&testMapper{}, TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil) + + cfg, err := Load([]byte("PACKAGE_NAME=ini")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = AllCapsUnderscore + tg := new(testMapper) + So(cfg.MapTo(tg), ShouldBeNil) + So(tg.PackageName, ShouldEqual, "ini") + }) +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/testdata/conf.ini b/Godeps/_workspace/src/github.com/go-ini/ini/testdata/conf.ini new file mode 100644 index 0000000000000..2ed0ac1d3ac2d --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/testdata/conf.ini @@ -0,0 +1,2 @@ +[author] +E-MAIL = u@gogs.io \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 0000000000000..531fcc11c70e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 0000000000000..1f98077570d83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 0000000000000..b03310a91fde0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 0000000000000..ad17bf0012e8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 0000000000000..187ef676dc9c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 0000000000000..67df3fc1c8788 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,12 @@ +package jmespath + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 0000000000000..1cd2d239c969d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json new file mode 100644 index 0000000000000..d550e969547c1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json @@ -0,0 +1,96 @@ +[{ + "given": + {"foo": {"bar": {"baz": "correct"}}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": {"baz": "correct"}} + }, + { + "expression": "foo.bar", + "result": {"baz": "correct"} + }, + { + "expression": "foo.bar.baz", + "result": "correct" + }, + { + "expression": "foo\n.\nbar\n.baz", + "result": "correct" + }, + { + "expression": "foo.bar.baz.bad", + "result": null + }, + { + "expression": "foo.bar.bad", + "result": null + }, + { + "expression": "foo.bad", + "result": null + }, + { + "expression": "bad", + "result": null + }, + { + "expression": "bad.morebad.morebad", + "result": null + } + ] +}, +{ + "given": + {"foo": {"bar": ["one", "two", "three"]}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": ["one", "two", "three"]} + }, + { + "expression": "foo.bar", + "result": ["one", "two", "three"] + } + ] +}, +{ + "given": ["one", "two", "three"], + "cases": [ + { + "expression": "one", + "result": null + }, + { + "expression": "two", + "result": null + }, + { + "expression": "three", + "result": null + }, + { + "expression": "one.two", + "result": null + } + ] +}, +{ + "given": + {"foo": {"1": ["one", "two", "three"], "-1": "bar"}}, + "cases": [ + { + "expression": "foo.\"1\"", + "result": ["one", "two", "three"] + }, + { + "expression": "foo.\"1\"[0]", + "result": "one" + }, + { + "expression": "foo.\"-1\"", + "result": "bar" + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json new file mode 100644 index 0000000000000..e3fa196b14ff6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json @@ -0,0 +1,257 @@ +[ + { + "given": { + "outer": { + "foo": "foo", + "bar": "bar", + "baz": "baz" + } + }, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] + }, + { + "given": { + "outer": { + "foo": "foo", + "bool": false, + "empty_list": [], + "empty_string": "" + } + }, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] + }, + { + "given": { + "True": true, + "False": false, + "Number": 5, + "EmptyList": [], + "Zero": 0 + }, + "cases": [ + { + "expression": "True && False", + "result": false + }, + { + "expression": "False && True", + "result": false + }, + { + "expression": "True && True", + "result": true + }, + { + "expression": "False && False", + "result": false + }, + { + "expression": "True && Number", + "result": 5 + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "Number && False", + "result": false + }, + { + "expression": "Number && EmptyList", + "result": [] + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "EmptyList && True", + "result": [] + }, + { + "expression": "EmptyList && False", + "result": [] + }, + { + "expression": "True || False", + "result": true + }, + { + "expression": "True || True", + "result": true + }, + { + "expression": "False || True", + "result": true + }, + { + "expression": "False || False", + "result": false + }, + { + "expression": "Number || EmptyList", + "result": 5 + }, + { + "expression": "Number || True", + "result": 5 + }, + { + "expression": "Number || True && False", + "result": 5 + }, + { + "expression": "(Number || True) && False", + "result": false + }, + { + "expression": "Number || (True && False)", + "result": 5 + }, + { + "expression": "!True", + "result": false + }, + { + "expression": "!False", + "result": true + }, + { + "expression": "!Number", + "result": false + }, + { + "expression": "!EmptyList", + "result": true + }, + { + "expression": "True && !False", + "result": true + }, + { + "expression": "True && !EmptyList", + "result": true + }, + { + "expression": "!False && !EmptyList", + "result": true + }, + { + "expression": "!(True && False)", + "result": true + }, + { + "expression": "!Zero", + "result": false + }, + { + "expression": "!!Zero", + "result": true + } + ] + }, + { + "given": { + "one": 1, + "two": 2, + "three": 3 + }, + "cases": [ + { + "expression": "one < two", + "result": true + }, + { + "expression": "one <= two", + "result": true + }, + { + "expression": "one == one", + "result": true + }, + { + "expression": "one == two", + "result": false + }, + { + "expression": "one > two", + "result": false + }, + { + "expression": "one >= two", + "result": false + }, + { + "expression": "one != two", + "result": true + }, + { + "expression": "one < two && three > one", + "result": true + }, + { + "expression": "one < two || three > one", + "result": true + }, + { + "expression": "one < two || three < one", + "result": true + }, + { + "expression": "two < one || three < one", + "result": false + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json new file mode 100644 index 0000000000000..0c26248d0792b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json @@ -0,0 +1,25 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "@", + "result": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + } + }, + { + "expression": "@.bar", + "result": {"baz": "qux"} + }, + { + "expression": "@.foo[0]", + "result": {"name": "a"} + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json new file mode 100644 index 0000000000000..4a62d951a6571 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json @@ -0,0 +1,46 @@ +[{ + "given": { + "foo.bar": "dot", + "foo bar": "space", + "foo\nbar": "newline", + "foo\"bar": "doublequote", + "c:\\\\windows\\path": "windows", + "/unix/path": "unix", + "\"\"\"": "threequotes", + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "\"foo.bar\"", + "result": "dot" + }, + { + "expression": "\"foo bar\"", + "result": "space" + }, + { + "expression": "\"foo\\nbar\"", + "result": "newline" + }, + { + "expression": "\"foo\\\"bar\"", + "result": "doublequote" + }, + { + "expression": "\"c:\\\\\\\\windows\\\\path\"", + "result": "windows" + }, + { + "expression": "\"/unix/path\"", + "result": "unix" + }, + { + "expression": "\"\\\"\\\"\\\"\"", + "result": "threequotes" + }, + { + "expression": "\"bar\".\"baz\"", + "result": "qux" + } + ] +}] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json new file mode 100644 index 0000000000000..5b9f52b1159a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json @@ -0,0 +1,468 @@ +[ + { + "given": {"foo": [{"name": "a"}, {"name": "b"}]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "foo[?name == 'a']", + "result": [{"name": "a"}] + } + ] + }, + { + "given": {"foo": [0, 1], "bar": [2, 3]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "*[?[0] == `0`]", + "result": [[], []] + } + ] + }, + { + "given": {"foo": [{"first": "foo", "last": "bar"}, + {"first": "foo", "last": "foo"}, + {"first": "foo", "last": "baz"}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?first == last]", + "result": [{"first": "foo", "last": "foo"}] + }, + { + "comment": "Verify projection created from filter", + "expression": "foo[?first == last].first", + "result": ["foo"] + } + ] + }, + { + "given": {"foo": [{"age": 20}, + {"age": 25}, + {"age": 30}]}, + "cases": [ + { + "comment": "Greater than with a number", + "expression": "foo[?age > `25`]", + "result": [{"age": 30}] + }, + { + "expression": "foo[?age >= `25`]", + "result": [{"age": 25}, {"age": 30}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age > `30`]", + "result": [] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `25`]", + "result": [{"age": 20}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age <= `25`]", + "result": [{"age": 20}, {"age": 25}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `20`]", + "result": [] + }, + { + "expression": "foo[?age == `20`]", + "result": [{"age": 20}] + }, + { + "expression": "foo[?age != `20`]", + "result": [{"age": 25}, {"age": 30}] + } + ] + }, + { + "given": {"foo": [{"top": {"name": "a"}}, + {"top": {"name": "b"}}]}, + "cases": [ + { + "comment": "Filter with subexpression", + "expression": "foo[?top.name == 'a']", + "result": [{"top": {"name": "a"}}] + } + ] + }, + { + "given": {"foo": [{"top": {"first": "foo", "last": "bar"}}, + {"top": {"first": "foo", "last": "foo"}}, + {"top": {"first": "foo", "last": "baz"}}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?top.first == top.last]", + "result": [{"top": {"first": "foo", "last": "foo"}}] + }, + { + "comment": "Matching a JSON array", + "expression": "foo[?top == `{\"first\": \"foo\", \"last\": \"bar\"}`]", + "result": [{"top": {"first": "foo", "last": "bar"}}] + } + ] + }, + { + "given": {"foo": [ + {"key": true}, + {"key": false}, + {"key": 0}, + {"key": 1}, + {"key": [0]}, + {"key": {"bar": [0]}}, + {"key": null}, + {"key": [1]}, + {"key": {"a":2}} + ]}, + "cases": [ + { + "expression": "foo[?key == `true`]", + "result": [{"key": true}] + }, + { + "expression": "foo[?key == `false`]", + "result": [{"key": false}] + }, + { + "expression": "foo[?key == `0`]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?key == `1`]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?key == `[0]`]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?key == `{\"bar\": [0]}`]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?key == `null`]", + "result": [{"key": null}] + }, + { + "expression": "foo[?key == `[1]`]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?key == `{\"a\":2}`]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?`true` == key]", + "result": [{"key": true}] + }, + { + "expression": "foo[?`false` == key]", + "result": [{"key": false}] + }, + { + "expression": "foo[?`0` == key]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?`1` == key]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?`[0]` == key]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?`{\"bar\": [0]}` == key]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?`null` == key]", + "result": [{"key": null}] + }, + { + "expression": "foo[?`[1]` == key]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?`{\"a\":2}` == key]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?key != `true`]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `false`]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `0`]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `1`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `null`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `[1]`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `{\"a\":2}`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + }, + { + "expression": "foo[?`true` != key]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`false` != key]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`0` != key]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`1` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`null` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`[1]` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`{\"a\":2}` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + } + ] + }, + { + "given": {"reservations": [ + {"instances": [ + {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}, + {"foo": 1, "bar": 2}, {"foo": 2, "bar": 1}]}]}, + "cases": [ + { + "expression": "reservations[].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[*].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[].instances[?bar==`1`][]", + "result": [{"foo": 2, "bar": 1}] + } + ] + }, + { + "given": { + "baz": "other", + "foo": [ + {"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?bar==`1`].bar[0]", + "result": [] + } + ] + }, + { + "given": { + "foo": [ + {"a": 1, "b": {"c": "x"}}, + {"a": 1, "b": {"c": "y"}}, + {"a": 1, "b": {"c": "z"}}, + {"a": 2, "b": {"c": "z"}}, + {"a": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?a==`1`].b.c", + "result": ["x", "y", "z"] + } + ] + }, + { + "given": {"foo": [{"name": "a"}, {"name": "b"}, {"name": "c"}]}, + "cases": [ + { + "comment": "Filter with or expression", + "expression": "foo[?name == 'a' || name == 'b']", + "result": [{"name": "a"}, {"name": "b"}] + }, + { + "expression": "foo[?name == 'a' || name == 'e']", + "result": [{"name": "a"}] + }, + { + "expression": "foo[?name == 'a' || name == 'b' || name == 'c']", + "result": [{"name": "a"}, {"name": "b"}, {"name": "c"}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2}, {"a": 1, "b": 3}]}, + "cases": [ + { + "comment": "Filter with and expression", + "expression": "foo[?a == `1` && b == `2`]", + "result": [{"a": 1, "b": 2}] + }, + { + "expression": "foo[?a == `1` && b == `4`]", + "result": [] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Filter with Or and And expressions", + "expression": "foo[?c == `3` || a == `1` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "expression": "foo[?b == `2` || a == `3` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && b == `4` || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?(a == `3` && b == `4`) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?((a == `3` && b == `4`)) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && (b == `4` || b == `2`)]", + "result": [{"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && ((b == `4` || b == `2`))]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Verify precedence of or/and expressions", + "expression": "foo[?a == `1` || b ==`2` && c == `5`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "comment": "Parentheses can alter precedence", + "expression": "foo[?(a == `1` || b ==`2`) && c == `5`]", + "result": [] + }, + { + "comment": "Not expressions combined with and/or", + "expression": "foo[?!(a == `1` || b ==`2`)]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": { + "foo": [ + {"key": true}, + {"key": false}, + {"key": []}, + {"key": {}}, + {"key": [0]}, + {"key": {"a": "b"}}, + {"key": 0}, + {"key": 1}, + {"key": null}, + {"notkey": true} + ] + }, + "cases": [ + { + "comment": "Unary filter expression", + "expression": "foo[?key]", + "result": [ + {"key": true}, {"key": [0]}, {"key": {"a": "b"}}, + {"key": 0}, {"key": 1} + ] + }, + { + "comment": "Unary not filter expression", + "expression": "foo[?!key]", + "result": [ + {"key": false}, {"key": []}, {"key": {}}, + {"key": null}, {"notkey": true} + ] + }, + { + "comment": "Equality with null RHS", + "expression": "foo[?key == `null`]", + "result": [ + {"key": null}, {"notkey": true} + ] + } + ] + }, + { + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + "cases": [ + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ < `5`]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?`5` > @]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ == @]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json new file mode 100644 index 0000000000000..8b8db363a2b60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json @@ -0,0 +1,825 @@ +[{ + "given": + { + "foo": -1, + "zero": 0, + "numbers": [-1, 3, 4, 5], + "array": [-1, 3, 4, 5, "a", "100"], + "strings": ["a", "b", "c"], + "decimals": [1.01, 1.2, -1.5], + "str": "Str", + "false": false, + "empty_list": [], + "empty_hash": {}, + "objects": {"foo": "bar", "bar": "baz"}, + "null_key": null + }, + "cases": [ + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(str)", + "error": "invalid-type" + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(`false`)", + "error": "invalid-type" + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`1`, `2`)", + "error": "invalid-arity" + }, + { + "expression": "abs()", + "error": "invalid-arity" + }, + { + "expression": "unknown_function(`1`, `2`)", + "error": "unknown-function" + }, + { + "expression": "avg(numbers)", + "result": 2.75 + }, + { + "expression": "avg(array)", + "error": "invalid-type" + }, + { + "expression": "avg('abc')", + "error": "invalid-type" + }, + { + "expression": "avg(foo)", + "error": "invalid-type" + }, + { + "expression": "avg(@)", + "error": "invalid-type" + }, + { + "expression": "avg(strings)", + "error": "invalid-type" + }, + { + "expression": "ceil(`1.2`)", + "result": 2 + }, + { + "expression": "ceil(decimals[0])", + "result": 2 + }, + { + "expression": "ceil(decimals[1])", + "result": 2 + }, + { + "expression": "ceil(decimals[2])", + "result": -1 + }, + { + "expression": "ceil('string')", + "error": "invalid-type" + }, + { + "expression": "contains('abc', 'a')", + "result": true + }, + { + "expression": "contains('abc', 'd')", + "result": false + }, + { + "expression": "contains(`false`, 'd')", + "error": "invalid-type" + }, + { + "expression": "contains(strings, 'a')", + "result": true + }, + { + "expression": "contains(decimals, `1.2`)", + "result": true + }, + { + "expression": "contains(decimals, `false`)", + "result": false + }, + { + "expression": "ends_with(str, 'r')", + "result": true + }, + { + "expression": "ends_with(str, 'tr')", + "result": true + }, + { + "expression": "ends_with(str, 'Str')", + "result": true + }, + { + "expression": "ends_with(str, 'SStr')", + "result": false + }, + { + "expression": "ends_with(str, 'foo')", + "result": false + }, + { + "expression": "ends_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "floor(`1.2`)", + "result": 1 + }, + { + "expression": "floor('string')", + "error": "invalid-type" + }, + { + "expression": "floor(decimals[0])", + "result": 1 + }, + { + "expression": "floor(foo)", + "result": -1 + }, + { + "expression": "floor(str)", + "error": "invalid-type" + }, + { + "expression": "length('abc')", + "result": 3 + }, + { + "expression": "length('✓foo')", + "result": 4 + }, + { + "expression": "length('')", + "result": 0 + }, + { + "expression": "length(@)", + "result": 12 + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "length(str)", + "result": 3 + }, + { + "expression": "length(array)", + "result": 6 + }, + { + "expression": "length(objects)", + "result": 2 + }, + { + "expression": "length(`false`)", + "error": "invalid-type" + }, + { + "expression": "length(foo)", + "error": "invalid-type" + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "max(numbers)", + "result": 5 + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(strings)", + "result": "c" + }, + { + "expression": "max(abc)", + "error": "invalid-type" + }, + { + "expression": "max(array)", + "error": "invalid-type" + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(empty_list)", + "result": null + }, + { + "expression": "merge(`{}`)", + "result": {} + }, + { + "expression": "merge(`{}`, `{}`)", + "result": {} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"b\": 2}`)", + "result": {"a": 1, "b": 2} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"a\": 2}`)", + "result": {"a": 2} + }, + { + "expression": "merge(`{\"a\": 1, \"b\": 2}`, `{\"a\": 2, \"c\": 3}`, `{\"d\": 4}`)", + "result": {"a": 2, "b": 2, "c": 3, "d": 4} + }, + { + "expression": "min(numbers)", + "result": -1 + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(abc)", + "error": "invalid-type" + }, + { + "expression": "min(array)", + "error": "invalid-type" + }, + { + "expression": "min(empty_list)", + "result": null + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(strings)", + "result": "a" + }, + { + "expression": "type('abc')", + "result": "string" + }, + { + "expression": "type(`1.0`)", + "result": "number" + }, + { + "expression": "type(`2`)", + "result": "number" + }, + { + "expression": "type(`true`)", + "result": "boolean" + }, + { + "expression": "type(`false`)", + "result": "boolean" + }, + { + "expression": "type(`null`)", + "result": "null" + }, + { + "expression": "type(`[0]`)", + "result": "array" + }, + { + "expression": "type(`{\"a\": \"b\"}`)", + "result": "object" + }, + { + "expression": "type(@)", + "result": "object" + }, + { + "expression": "sort(keys(objects))", + "result": ["bar", "foo"] + }, + { + "expression": "keys(foo)", + "error": "invalid-type" + }, + { + "expression": "keys(strings)", + "error": "invalid-type" + }, + { + "expression": "keys(`false`)", + "error": "invalid-type" + }, + { + "expression": "sort(values(objects))", + "result": ["bar", "baz"] + }, + { + "expression": "keys(empty_hash)", + "result": [] + }, + { + "expression": "values(foo)", + "error": "invalid-type" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(',', `[\"a\", \"b\"]`)", + "result": "a,b" + }, + { + "expression": "join(',', `[\"a\", 0]`)", + "error": "invalid-type" + }, + { + "expression": "join(', ', str)", + "error": "invalid-type" + }, + { + "expression": "join('|', strings)", + "result": "a|b|c" + }, + { + "expression": "join(`2`, strings)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals[].to_string(@))", + "result": "1.01|1.2|-1.5" + }, + { + "expression": "join('|', empty_list)", + "result": "" + }, + { + "expression": "reverse(numbers)", + "result": [5, 4, 3, -1] + }, + { + "expression": "reverse(array)", + "result": ["100", "a", 5, 4, 3, -1] + }, + { + "expression": "reverse(`[]`)", + "result": [] + }, + { + "expression": "reverse('')", + "result": "" + }, + { + "expression": "reverse('hello world')", + "result": "dlrow olleh" + }, + { + "expression": "starts_with(str, 'S')", + "result": true + }, + { + "expression": "starts_with(str, 'St')", + "result": true + }, + { + "expression": "starts_with(str, 'Str')", + "result": true + }, + { + "expression": "starts_with(str, 'String')", + "result": false + }, + { + "expression": "starts_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "sum(numbers)", + "result": 11 + }, + { + "expression": "sum(decimals)", + "result": 0.71 + }, + { + "expression": "sum(array)", + "error": "invalid-type" + }, + { + "expression": "sum(array[].to_number(@))", + "result": 111 + }, + { + "expression": "sum(`[]`)", + "result": 0 + }, + { + "expression": "to_array('foo')", + "result": ["foo"] + }, + { + "expression": "to_array(`0`)", + "result": [0] + }, + { + "expression": "to_array(objects)", + "result": [{"foo": "bar", "bar": "baz"}] + }, + { + "expression": "to_array(`[1, 2, 3]`)", + "result": [1, 2, 3] + }, + { + "expression": "to_array(false)", + "result": [false] + }, + { + "expression": "to_string('foo')", + "result": "foo" + }, + { + "expression": "to_string(`1.2`)", + "result": "1.2" + }, + { + "expression": "to_string(`[0, 1]`)", + "result": "[0,1]" + }, + { + "expression": "to_number('1.0')", + "result": 1.0 + }, + { + "expression": "to_number('1.1')", + "result": 1.1 + }, + { + "expression": "to_number('4')", + "result": 4 + }, + { + "expression": "to_number('notanumber')", + "result": null + }, + { + "expression": "to_number(`false`)", + "result": null + }, + { + "expression": "to_number(`null`)", + "result": null + }, + { + "expression": "to_number(`[0]`)", + "result": null + }, + { + "expression": "to_number(`{\"foo\": 0}`)", + "result": null + }, + { + "expression": "\"to_string\"(`1.0`)", + "error": "syntax" + }, + { + "expression": "sort(numbers)", + "result": [-1, 3, 4, 5] + }, + { + "expression": "sort(strings)", + "result": ["a", "b", "c"] + }, + { + "expression": "sort(decimals)", + "result": [-1.5, 1.01, 1.2] + }, + { + "expression": "sort(array)", + "error": "invalid-type" + }, + { + "expression": "sort(abc)", + "error": "invalid-type" + }, + { + "expression": "sort(empty_list)", + "result": [] + }, + { + "expression": "sort(@)", + "error": "invalid-type" + }, + { + "expression": "not_null(unknown_key, str)", + "result": "Str" + }, + { + "expression": "not_null(unknown_key, foo.bar, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(unknown_key, null_key, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(all, expressions, are_null)", + "result": null + }, + { + "expression": "not_null()", + "error": "invalid-arity" + }, + { + "description": "function projection on single arg function", + "expression": "numbers[].to_string(@)", + "result": ["-1", "3", "4", "5"] + }, + { + "description": "function projection on single arg function", + "expression": "array[].to_number(@)", + "result": [-1, 3, 4, 5, 100] + } + ] +}, { + "given": + { + "foo": [ + {"b": "b", "a": "a"}, + {"c": "c", "b": "b"}, + {"d": "d", "c": "c"}, + {"e": "e", "d": "d"}, + {"f": "f", "e": "e"} + ] + }, + "cases": [ + { + "description": "function projection on variadic function", + "expression": "foo[].not_null(f, e, d, c, b, a)", + "result": ["b", "c", "d", "e", "f"] + } + ] +}, { + "given": + { + "people": [ + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"}, + {"age": 10, "age_str": "10", "bool": true, "name": 3} + ] + }, + "cases": [ + { + "description": "sort by field expression", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "expression": "sort_by(people, &age_str)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "sort by function expression", + "expression": "sort_by(people, &to_number(age_str))", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "function projection on sort_by function", + "expression": "sort_by(people, &age)[].name", + "result": [3, "a", "c", "b", "d"] + }, + { + "expression": "sort_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &age)[].extra", + "result": ["foo", "bar"] + }, + { + "expression": "sort_by(`[]`, &age)", + "result": [] + }, + { + "expression": "max_by(people, &age)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &age_str)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &to_number(age_str))", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "min_by(people, &age)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &age_str)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &to_number(age_str))", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + } + ] +}, { + "given": + { + "people": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + }, + "cases": [ + { + "description": "stable sort order", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + } + ] +}, { + "given": + { + "people": [ + {"a": 10, "b": 1, "c": "z"}, + {"a": 10, "b": 2, "c": null}, + {"a": 10, "b": 3}, + {"a": 10, "b": 4, "c": "z"}, + {"a": 10, "b": 5, "c": null}, + {"a": 10, "b": 6}, + {"a": 10, "b": 7, "c": "z"}, + {"a": 10, "b": 8, "c": null}, + {"a": 10, "b": 9} + ], + "empty": [] + }, + "cases": [ + { + "expression": "map(&a, people)", + "result": [10, 10, 10, 10, 10, 10, 10, 10, 10] + }, + { + "expression": "map(&c, people)", + "result": ["z", null, null, "z", null, null, "z", null, null] + }, + { + "expression": "map(&a, badkey)", + "error": "invalid-type" + }, + { + "expression": "map(&foo, empty)", + "result": [] + } + ] +}, { + "given": { + "array": [ + { + "foo": {"bar": "yes1"} + }, + { + "foo": {"bar": "yes2"} + }, + { + "foo1": {"bar": "no"} + } + ]}, + "cases": [ + { + "expression": "map(&foo.bar, array)", + "result": ["yes1", "yes2", null] + }, + { + "expression": "map(&foo1.bar, array)", + "result": [null, null, "no"] + }, + { + "expression": "map(&foo.bar.baz, array)", + "result": [null, null, null] + } + ] +}, { + "given": { + "array": [[1, 2, 3, [4]], [5, 6, 7, [8, 9]]] + }, + "cases": [ + { + "expression": "map(&[], array)", + "result": [[1, 2, 3, 4], [5, 6, 7, 8, 9]] + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json new file mode 100644 index 0000000000000..7998a41ac9deb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json @@ -0,0 +1,1377 @@ +[ + { + "given": { + "__L": true + }, + "cases": [ + { + "expression": "__L", + "result": true + } + ] + }, + { + "given": { + "!\r": true + }, + "cases": [ + { + "expression": "\"!\\r\"", + "result": true + } + ] + }, + { + "given": { + "Y_1623": true + }, + "cases": [ + { + "expression": "Y_1623", + "result": true + } + ] + }, + { + "given": { + "x": true + }, + "cases": [ + { + "expression": "x", + "result": true + } + ] + }, + { + "given": { + "\tF\uCebb": true + }, + "cases": [ + { + "expression": "\"\\tF\\uCebb\"", + "result": true + } + ] + }, + { + "given": { + " \t": true + }, + "cases": [ + { + "expression": "\" \\t\"", + "result": true + } + ] + }, + { + "given": { + " ": true + }, + "cases": [ + { + "expression": "\" \"", + "result": true + } + ] + }, + { + "given": { + "v2": true + }, + "cases": [ + { + "expression": "v2", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "_X": true + }, + "cases": [ + { + "expression": "_X", + "result": true + } + ] + }, + { + "given": { + "\t4\ud9da\udd15": true + }, + "cases": [ + { + "expression": "\"\\t4\\ud9da\\udd15\"", + "result": true + } + ] + }, + { + "given": { + "v24_W": true + }, + "cases": [ + { + "expression": "v24_W", + "result": true + } + ] + }, + { + "given": { + "H": true + }, + "cases": [ + { + "expression": "\"H\"", + "result": true + } + ] + }, + { + "given": { + "\f": true + }, + "cases": [ + { + "expression": "\"\\f\"", + "result": true + } + ] + }, + { + "given": { + "E4": true + }, + "cases": [ + { + "expression": "\"E4\"", + "result": true + } + ] + }, + { + "given": { + "!": true + }, + "cases": [ + { + "expression": "\"!\"", + "result": true + } + ] + }, + { + "given": { + "tM": true + }, + "cases": [ + { + "expression": "tM", + "result": true + } + ] + }, + { + "given": { + " [": true + }, + "cases": [ + { + "expression": "\" [\"", + "result": true + } + ] + }, + { + "given": { + "R!": true + }, + "cases": [ + { + "expression": "\"R!\"", + "result": true + } + ] + }, + { + "given": { + "_6W": true + }, + "cases": [ + { + "expression": "_6W", + "result": true + } + ] + }, + { + "given": { + "\uaBA1\r": true + }, + "cases": [ + { + "expression": "\"\\uaBA1\\r\"", + "result": true + } + ] + }, + { + "given": { + "tL7": true + }, + "cases": [ + { + "expression": "tL7", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\">\"", + "result": true + } + ] + }, + { + "given": { + "hvu": true + }, + "cases": [ + { + "expression": "hvu", + "result": true + } + ] + }, + { + "given": { + "; !": true + }, + "cases": [ + { + "expression": "\"; !\"", + "result": true + } + ] + }, + { + "given": { + "hU": true + }, + "cases": [ + { + "expression": "hU", + "result": true + } + ] + }, + { + "given": { + "!I\n\/": true + }, + "cases": [ + { + "expression": "\"!I\\n\\/\"", + "result": true + } + ] + }, + { + "given": { + "\uEEbF": true + }, + "cases": [ + { + "expression": "\"\\uEEbF\"", + "result": true + } + ] + }, + { + "given": { + "U)\t": true + }, + "cases": [ + { + "expression": "\"U)\\t\"", + "result": true + } + ] + }, + { + "given": { + "fa0_9": true + }, + "cases": [ + { + "expression": "fa0_9", + "result": true + } + ] + }, + { + "given": { + "/": true + }, + "cases": [ + { + "expression": "\"/\"", + "result": true + } + ] + }, + { + "given": { + "Gy": true + }, + "cases": [ + { + "expression": "Gy", + "result": true + } + ] + }, + { + "given": { + "\b": true + }, + "cases": [ + { + "expression": "\"\\b\"", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\"<\"", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "\t&\\\r": true + }, + "cases": [ + { + "expression": "\"\\t&\\\\\\r\"", + "result": true + } + ] + }, + { + "given": { + "#": true + }, + "cases": [ + { + "expression": "\"#\"", + "result": true + } + ] + }, + { + "given": { + "B__": true + }, + "cases": [ + { + "expression": "B__", + "result": true + } + ] + }, + { + "given": { + "\nS \n": true + }, + "cases": [ + { + "expression": "\"\\nS \\n\"", + "result": true + } + ] + }, + { + "given": { + "Bp": true + }, + "cases": [ + { + "expression": "Bp", + "result": true + } + ] + }, + { + "given": { + ",\t;": true + }, + "cases": [ + { + "expression": "\",\\t;\"", + "result": true + } + ] + }, + { + "given": { + "B_q": true + }, + "cases": [ + { + "expression": "B_q", + "result": true + } + ] + }, + { + "given": { + "\/+\t\n\b!Z": true + }, + "cases": [ + { + "expression": "\"\\/+\\t\\n\\b!Z\"", + "result": true + } + ] + }, + { + "given": { + "\udadd\udfc7\\ueFAc": true + }, + "cases": [ + { + "expression": "\"\udadd\udfc7\\\\ueFAc\"", + "result": true + } + ] + }, + { + "given": { + ":\f": true + }, + "cases": [ + { + "expression": "\":\\f\"", + "result": true + } + ] + }, + { + "given": { + "\/": true + }, + "cases": [ + { + "expression": "\"\\/\"", + "result": true + } + ] + }, + { + "given": { + "_BW_6Hg_Gl": true + }, + "cases": [ + { + "expression": "_BW_6Hg_Gl", + "result": true + } + ] + }, + { + "given": { + "\udbcf\udc02": true + }, + "cases": [ + { + "expression": "\"\udbcf\udc02\"", + "result": true + } + ] + }, + { + "given": { + "zs1DC": true + }, + "cases": [ + { + "expression": "zs1DC", + "result": true + } + ] + }, + { + "given": { + "__434": true + }, + "cases": [ + { + "expression": "__434", + "result": true + } + ] + }, + { + "given": { + "\udb94\udd41": true + }, + "cases": [ + { + "expression": "\"\udb94\udd41\"", + "result": true + } + ] + }, + { + "given": { + "Z_5": true + }, + "cases": [ + { + "expression": "Z_5", + "result": true + } + ] + }, + { + "given": { + "z_M_": true + }, + "cases": [ + { + "expression": "z_M_", + "result": true + } + ] + }, + { + "given": { + "YU_2": true + }, + "cases": [ + { + "expression": "YU_2", + "result": true + } + ] + }, + { + "given": { + "_0": true + }, + "cases": [ + { + "expression": "_0", + "result": true + } + ] + }, + { + "given": { + "\b+": true + }, + "cases": [ + { + "expression": "\"\\b+\"", + "result": true + } + ] + }, + { + "given": { + "\"": true + }, + "cases": [ + { + "expression": "\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "D7": true + }, + "cases": [ + { + "expression": "D7", + "result": true + } + ] + }, + { + "given": { + "_62L": true + }, + "cases": [ + { + "expression": "_62L", + "result": true + } + ] + }, + { + "given": { + "\tK\t": true + }, + "cases": [ + { + "expression": "\"\\tK\\t\"", + "result": true + } + ] + }, + { + "given": { + "\n\\\f": true + }, + "cases": [ + { + "expression": "\"\\n\\\\\\f\"", + "result": true + } + ] + }, + { + "given": { + "I_": true + }, + "cases": [ + { + "expression": "I_", + "result": true + } + ] + }, + { + "given": { + "W_a0_": true + }, + "cases": [ + { + "expression": "W_a0_", + "result": true + } + ] + }, + { + "given": { + "BQ": true + }, + "cases": [ + { + "expression": "BQ", + "result": true + } + ] + }, + { + "given": { + "\tX$\uABBb": true + }, + "cases": [ + { + "expression": "\"\\tX$\\uABBb\"", + "result": true + } + ] + }, + { + "given": { + "Z9": true + }, + "cases": [ + { + "expression": "Z9", + "result": true + } + ] + }, + { + "given": { + "\b%\"\uda38\udd0f": true + }, + "cases": [ + { + "expression": "\"\\b%\\\"\uda38\udd0f\"", + "result": true + } + ] + }, + { + "given": { + "_F": true + }, + "cases": [ + { + "expression": "_F", + "result": true + } + ] + }, + { + "given": { + "!,": true + }, + "cases": [ + { + "expression": "\"!,\"", + "result": true + } + ] + }, + { + "given": { + "\"!": true + }, + "cases": [ + { + "expression": "\"\\\"!\"", + "result": true + } + ] + }, + { + "given": { + "Hh": true + }, + "cases": [ + { + "expression": "Hh", + "result": true + } + ] + }, + { + "given": { + "&": true + }, + "cases": [ + { + "expression": "\"&\"", + "result": true + } + ] + }, + { + "given": { + "9\r\\R": true + }, + "cases": [ + { + "expression": "\"9\\r\\\\R\"", + "result": true + } + ] + }, + { + "given": { + "M_k": true + }, + "cases": [ + { + "expression": "M_k", + "result": true + } + ] + }, + { + "given": { + "!\b\n\udb06\ude52\"\"": true + }, + "cases": [ + { + "expression": "\"!\\b\\n\udb06\ude52\\\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "6": true + }, + "cases": [ + { + "expression": "\"6\"", + "result": true + } + ] + }, + { + "given": { + "_7": true + }, + "cases": [ + { + "expression": "_7", + "result": true + } + ] + }, + { + "given": { + "0": true + }, + "cases": [ + { + "expression": "\"0\"", + "result": true + } + ] + }, + { + "given": { + "\\8\\": true + }, + "cases": [ + { + "expression": "\"\\\\8\\\\\"", + "result": true + } + ] + }, + { + "given": { + "b7eo": true + }, + "cases": [ + { + "expression": "b7eo", + "result": true + } + ] + }, + { + "given": { + "xIUo9": true + }, + "cases": [ + { + "expression": "xIUo9", + "result": true + } + ] + }, + { + "given": { + "5": true + }, + "cases": [ + { + "expression": "\"5\"", + "result": true + } + ] + }, + { + "given": { + "?": true + }, + "cases": [ + { + "expression": "\"?\"", + "result": true + } + ] + }, + { + "given": { + "sU": true + }, + "cases": [ + { + "expression": "sU", + "result": true + } + ] + }, + { + "given": { + "VH2&H\\\/": true + }, + "cases": [ + { + "expression": "\"VH2&H\\\\\\/\"", + "result": true + } + ] + }, + { + "given": { + "_C": true + }, + "cases": [ + { + "expression": "_C", + "result": true + } + ] + }, + { + "given": { + "_": true + }, + "cases": [ + { + "expression": "_", + "result": true + } + ] + }, + { + "given": { + "<\t": true + }, + "cases": [ + { + "expression": "\"<\\t\"", + "result": true + } + ] + }, + { + "given": { + "\uD834\uDD1E": true + }, + "cases": [ + { + "expression": "\"\\uD834\\uDD1E\"", + "result": true + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json new file mode 100644 index 0000000000000..aa03b35dd7f17 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json @@ -0,0 +1,346 @@ +[{ + "given": + {"foo": {"bar": ["zero", "one", "two"]}}, + "cases": [ + { + "expression": "foo.bar[0]", + "result": "zero" + }, + { + "expression": "foo.bar[1]", + "result": "one" + }, + { + "expression": "foo.bar[2]", + "result": "two" + }, + { + "expression": "foo.bar[3]", + "result": null + }, + { + "expression": "foo.bar[-1]", + "result": "two" + }, + { + "expression": "foo.bar[-2]", + "result": "one" + }, + { + "expression": "foo.bar[-3]", + "result": "zero" + }, + { + "expression": "foo.bar[-4]", + "result": null + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo[0].bar", + "result": "one" + }, + { + "expression": "foo[1].bar", + "result": "two" + }, + { + "expression": "foo[2].bar", + "result": "three" + }, + { + "expression": "foo[3].notbar", + "result": "four" + }, + { + "expression": "foo[3].bar", + "result": null + }, + { + "expression": "foo[0]", + "result": {"bar": "one"} + }, + { + "expression": "foo[1]", + "result": {"bar": "two"} + }, + { + "expression": "foo[2]", + "result": {"bar": "three"} + }, + { + "expression": "foo[3]", + "result": {"notbar": "four"} + }, + { + "expression": "foo[4]", + "result": null + } + ] +}, +{ + "given": [ + "one", "two", "three" + ], + "cases": [ + { + "expression": "[0]", + "result": "one" + }, + { + "expression": "[1]", + "result": "two" + }, + { + "expression": "[2]", + "result": "three" + }, + { + "expression": "[-1]", + "result": "three" + }, + { + "expression": "[-2]", + "result": "two" + }, + { + "expression": "[-3]", + "result": "one" + } + ] +}, +{ + "given": {"reservations": [ + {"instances": [{"foo": 1}, {"foo": 2}]} + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo", + "result": [1, 2] + }, + { + "expression": "reservations[].instances[].bar", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + } + ] +}, +{ + "given": {"reservations": [{ + "instances": [ + {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"foo": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"foo": "bar"}, + {"notfoo": [{"bar": 20}, {"bar": 21}, {"notbar": [7]}, {"bar": 22}]}, + {"bar": [{"baz": [1]}, {"baz": [2]}, {"baz": [3]}, {"baz": [4]}]}, + {"baz": [{"baz": [1, 2]}, {"baz": []}, {"baz": []}, {"baz": [3, 4]}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + }, { + "instances": [ + {"a": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"b": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"c": "bar"}, + {"notfoo": [{"bar": 23}, {"bar": 24}, {"notbar": [7]}, {"bar": 25}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + } + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo[].bar", + "result": [1, 2, 4, 5, 6, 8] + }, + { + "expression": "reservations[].instances[].foo[].baz", + "result": [] + }, + { + "expression": "reservations[].instances[].notfoo[].bar", + "result": [20, 21, 22, 23, 24, 25] + }, + { + "expression": "reservations[].instances[].notfoo[].notbar", + "result": [[7], [7]] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].instances[].foo[].notbar", + "result": [3, [7]] + }, + { + "expression": "reservations[].instances[].bar[].baz", + "result": [[1], [2], [3], [4]] + }, + { + "expression": "reservations[].instances[].baz[].baz", + "result": [[1, 2], [], [], [3, 4]] + }, + { + "expression": "reservations[].instances[].qux[].baz", + "result": [[], [1, 2, 3], [4], [], [], [1, 2, 3], [4], []] + }, + { + "expression": "reservations[].instances[].qux[].baz[]", + "result": [1, 2, 3, 4, 1, 2, 3, 4] + } + ] +}, +{ + "given": { + "foo": [ + [["one", "two"], ["three", "four"]], + [["five", "six"], ["seven", "eight"]], + [["nine"], ["ten"]] + ] + }, + "cases": [ + { + "expression": "foo[]", + "result": [["one", "two"], ["three", "four"], ["five", "six"], + ["seven", "eight"], ["nine"], ["ten"]] + }, + { + "expression": "foo[][0]", + "result": ["one", "three", "five", "seven", "nine", "ten"] + }, + { + "expression": "foo[][1]", + "result": ["two", "four", "six", "eight"] + }, + { + "expression": "foo[][0][0]", + "result": [] + }, + { + "expression": "foo[][2][2]", + "result": [] + }, + { + "expression": "foo[][0][0][100]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].baz", + "result": [1, 3, 5, 7] + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[]", + "result": null + }, + { + "expression": "hash[]", + "result": null + }, + { + "expression": "number[]", + "result": null + }, + { + "expression": "nullvalue[]", + "result": null + }, + { + "expression": "string[].foo", + "result": null + }, + { + "expression": "hash[].foo", + "result": null + }, + { + "expression": "number[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo[].bar", + "result": null + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json new file mode 100644 index 0000000000000..c6706b97196d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json @@ -0,0 +1,185 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "`\"foo\"`", + "result": "foo" + }, + { + "comment": "Interpret escaped unicode.", + "expression": "`\"\\u03a6\"`", + "result": "Φ" + }, + { + "expression": "`\"✓\"`", + "result": "✓" + }, + { + "expression": "`[1, 2, 3]`", + "result": [1, 2, 3] + }, + { + "expression": "`{\"a\": \"b\"}`", + "result": {"a": "b"} + }, + { + "expression": "`true`", + "result": true + }, + { + "expression": "`false`", + "result": false + }, + { + "expression": "`null`", + "result": null + }, + { + "expression": "`0`", + "result": 0 + }, + { + "expression": "`1`", + "result": 1 + }, + { + "expression": "`2`", + "result": 2 + }, + { + "expression": "`3`", + "result": 3 + }, + { + "expression": "`4`", + "result": 4 + }, + { + "expression": "`5`", + "result": 5 + }, + { + "expression": "`6`", + "result": 6 + }, + { + "expression": "`7`", + "result": 7 + }, + { + "expression": "`8`", + "result": 8 + }, + { + "expression": "`9`", + "result": 9 + }, + { + "comment": "Escaping a backtick in quotes", + "expression": "`\"foo\\`bar\"`", + "result": "foo`bar" + }, + { + "comment": "Double quote in literal", + "expression": "`\"foo\\\"bar\"`", + "result": "foo\"bar" + }, + { + "expression": "`\"1\\`\"`", + "result": "1`" + }, + { + "comment": "Multiple literal expressions with escapes", + "expression": "`\"\\\\\"`.{a:`\"b\"`}", + "result": {"a": "b"} + }, + { + "comment": "literal . identifier", + "expression": "`{\"a\": \"b\"}`.a", + "result": "b" + }, + { + "comment": "literal . identifier . identifier", + "expression": "`{\"a\": {\"b\": \"c\"}}`.a.b", + "result": "c" + }, + { + "comment": "literal . identifier bracket-expr", + "expression": "`[0, 1, 2]`[1]", + "result": 1 + } + ] + }, + { + "comment": "Literals", + "given": {"type": "object"}, + "cases": [ + { + "comment": "Literal with leading whitespace", + "expression": "` {\"foo\": true}`", + "result": {"foo": true} + }, + { + "comment": "Literal with trailing whitespace", + "expression": "`{\"foo\": true} `", + "result": {"foo": true} + }, + { + "comment": "Literal on RHS of subexpr not allowed", + "expression": "foo.`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Raw String Literals", + "given": {}, + "cases": [ + { + "expression": "'foo'", + "result": "foo" + }, + { + "expression": "' foo '", + "result": " foo " + }, + { + "expression": "'0'", + "result": "0" + }, + { + "expression": "'newline\n'", + "result": "newline\n" + }, + { + "expression": "'\n'", + "result": "\n" + }, + { + "expression": "'✓'", + "result": "✓" + }, + { + "expression": "'𝄞'", + "result": "𝄞" + }, + { + "expression": "' [foo] '", + "result": " [foo] " + }, + { + "expression": "'[foo]'", + "result": "[foo]" + }, + { + "comment": "Do not interpret escaped unicode.", + "expression": "'\\u03a6'", + "result": "\\u03a6" + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json new file mode 100644 index 0000000000000..8f2a481ed7cfe --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json @@ -0,0 +1,393 @@ +[{ + "given": { + "foo": { + "bar": "bar", + "baz": "baz", + "qux": "qux", + "nested": { + "one": { + "a": "first", + "b": "second", + "c": "third" + }, + "two": { + "a": "first", + "b": "second", + "c": "third" + }, + "three": { + "a": "first", + "b": "second", + "c": {"inner": "third"} + } + } + }, + "bar": 1, + "baz": 2, + "qux\"": 3 + }, + "cases": [ + { + "expression": "foo.{bar: bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"bar\": bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"foo.bar\": bar}", + "result": {"foo.bar": "bar"} + }, + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{\"bar\": bar, \"baz\": baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "{\"baz\": baz, \"qux\\\"\": \"qux\\\"\"}", + "result": {"baz": 2, "qux\"": 3} + }, + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{bar: bar,qux: qux}", + "result": {"bar": "bar", "qux": "qux"} + }, + { + "expression": "foo.{bar: bar, noexist: noexist}", + "result": {"bar": "bar", "noexist": null} + }, + { + "expression": "foo.{noexist: noexist, alsonoexist: alsonoexist}", + "result": {"noexist": null, "alsonoexist": null} + }, + { + "expression": "foo.badkey.{nokey: nokey, alsonokey: alsonokey}", + "result": null + }, + { + "expression": "foo.nested.*.{a: a,b: b}", + "result": [{"a": "first", "b": "second"}, + {"a": "first", "b": "second"}, + {"a": "first", "b": "second"}] + }, + { + "expression": "foo.nested.three.{a: a, cinner: c.inner}", + "result": {"a": "first", "cinner": "third"} + }, + { + "expression": "foo.nested.three.{a: a, c: c.inner.bad.key}", + "result": {"a": "first", "c": null} + }, + { + "expression": "foo.{a: nested.one.a, b: nested.two.b}", + "result": {"a": "first", "b": "second"} + }, + { + "expression": "{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "{bar: bar}", + "result": {"bar": 1} + }, + { + "expression": "{otherkey: bar}", + "result": {"otherkey": 1} + }, + { + "expression": "{no: no, exist: exist}", + "result": {"no": null, "exist": null} + }, + { + "expression": "foo.[bar]", + "result": ["bar"] + }, + { + "expression": "foo.[bar,baz]", + "result": ["bar", "baz"] + }, + { + "expression": "foo.[bar,qux]", + "result": ["bar", "qux"] + }, + { + "expression": "foo.[bar,noexist]", + "result": ["bar", null] + }, + { + "expression": "foo.[noexist,alsonoexist]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": [2, 3, 4]} + }, + "cases": [ + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": 1, "baz": [2, 3, 4]} + }, + { + "expression": "foo.[bar,baz[0]]", + "result": [1, 2] + }, + { + "expression": "foo.[bar,baz[1]]", + "result": [1, 3] + }, + { + "expression": "foo.[bar,baz[2]]", + "result": [1, 4] + }, + { + "expression": "foo.[bar,baz[3]]", + "result": [1, null] + }, + { + "expression": "foo.[bar[0],baz[3]]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": 2} + }, + "cases": [ + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "foo.[bar,baz]", + "result": [1, 2] + } + ] +}, { + "given": { + "foo": { + "bar": {"baz": [{"common": "first", "one": 1}, + {"common": "second", "two": 2}]}, + "ignoreme": 1, + "includeme": true + } + }, + "cases": [ + { + "expression": "foo.{bar: bar.baz[1],includeme: includeme}", + "result": {"bar": {"common": "second", "two": 2}, "includeme": true} + }, + { + "expression": "foo.{\"bar.baz.two\": bar.baz[1].two, includeme: includeme}", + "result": {"bar.baz.two": 2, "includeme": true} + }, + { + "expression": "foo.[includeme, bar.baz[*].common]", + "result": [true, ["first", "second"]] + }, + { + "expression": "foo.[includeme, bar.baz[*].none]", + "result": [true, []] + }, + { + "expression": "foo.[includeme, bar.baz[].common]", + "result": [true, ["first", "second"]] + } + ] +}, { + "given": { + "reservations": [{ + "instances": [ + {"id": "id1", + "name": "first"}, + {"id": "id2", + "name": "second"} + ]}, { + "instances": [ + {"id": "id3", + "name": "third"}, + {"id": "id4", + "name": "fourth"} + ]} + ]}, + "cases": [ + { + "expression": "reservations[*].instances[*].{id: id, name: name}", + "result": [[{"id": "id1", "name": "first"}, {"id": "id2", "name": "second"}], + [{"id": "id3", "name": "third"}, {"id": "id4", "name": "fourth"}]] + }, + { + "expression": "reservations[].instances[].{id: id, name: name}", + "result": [{"id": "id1", "name": "first"}, + {"id": "id2", "name": "second"}, + {"id": "id3", "name": "third"}, + {"id": "id4", "name": "fourth"}] + }, + { + "expression": "reservations[].instances[].[id, name]", + "result": [["id1", "first"], + ["id2", "second"], + ["id3", "third"], + ["id4", "fourth"]] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].[baz, qux]", + "result": [[1, 2], [3, 4], [5, 6], [7, 8]] + }, + { + "expression": "foo[].bar[].[baz]", + "result": [[1], [3], [5], [7]] + }, + { + "expression": "foo[].bar[].[baz, qux][]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "abc" + }, { + "bar": "def" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].bar, qux[0]]", + "result": [["abc", "def"], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].[bar, boo], qux[0]]", + "result": [[["a", "c" ], ["d", "f" ]], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].not_there || baz[*].bar, qux[0]]", + "result": [["a", "d"], "zero"] + } + ] +}, +{ + "given": {"type": "object"}, + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*],*]", + "result": [null, ["object"]] + } + ] +}, +{ + "given": [], + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*]]", + "result": [[]] + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json new file mode 100644 index 0000000000000..2127cf441bad5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json @@ -0,0 +1,59 @@ +[{ + "given": + {"outer": {"foo": "foo", "bar": "bar", "baz": "baz"}}, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] +}, { + "given": + {"outer": {"foo": "foo", "bool": false, "empty_list": [], "empty_string": ""}}, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] +}] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json new file mode 100644 index 0000000000000..b10c0a496d653 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json @@ -0,0 +1,131 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "subkey" + }, + "other": { + "baz": "subkey" + }, + "other2": { + "baz": "subkey" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + } + } + }, + "cases": [ + { + "expression": "foo.*.baz | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [1]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [2]", + "result": "subkey" + }, + { + "expression": "foo.bar.* | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.notbaz | [*]", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | *.baz", + "result": ["subkey", "subkey"] + } + ] +}, { + "given": { + "foo": { + "bar": { + "baz": "one" + }, + "other": { + "baz": "two" + }, + "other2": { + "baz": "three" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["d", "e", "f"] + } + } + }, + "cases": [ + { + "expression": "foo | bar", + "result": {"baz": "one"} + }, + { + "expression": "foo | bar | baz", + "result": "one" + }, + { + "expression": "foo|bar| baz", + "result": "one" + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "[foo.bar, foo.other] | [0]", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | a", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | b", + "result": {"baz": "two"} + }, + { + "expression": "foo.bam || foo.bar | baz", + "result": "one" + }, + { + "expression": "foo | not_there || bar", + "result": {"baz": "one"} + } + ] +}, { + "given": { + "foo": [{ + "bar": [{ + "baz": "one" + }, { + "baz": "two" + }] + }, { + "bar": [{ + "baz": "three" + }, { + "baz": "four" + }] + }] + }, + "cases": [ + { + "expression": "foo[*].bar[*] | [0][0]", + "result": {"baz": "one"} + } + ] +}] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json new file mode 100644 index 0000000000000..359477278c848 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json @@ -0,0 +1,187 @@ +[{ + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + "bar": { + "baz": 1 + } + }, + "cases": [ + { + "expression": "bar[0:10]", + "result": null + }, + { + "expression": "foo[0:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[1:9]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + }, + { + "expression": "foo[0:10:2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[5:]", + "result": [5, 6, 7, 8, 9] + }, + { + "expression": "foo[5::2]", + "result": [5, 7, 9] + }, + { + "expression": "foo[::2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[::-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[1::2]", + "result": [1, 3, 5, 7, 9] + }, + { + "expression": "foo[10:0:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1] + }, + { + "expression": "foo[10:5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:-2]", + "result": [8, 6, 4] + }, + { + "expression": "foo[0:20]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[10:-20:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[10:-20]", + "result": [] + }, + { + "expression": "foo[-4:-1]", + "result": [6, 7, 8] + }, + { + "expression": "foo[:-5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:0]", + "error": "invalid-value" + }, + { + "expression": "foo[8:2:0:1]", + "error": "syntax" + }, + { + "expression": "foo[8:2&]", + "error": "syntax" + }, + { + "expression": "foo[2:a:3]", + "error": "syntax" + } + ] +}, { + "given": { + "foo": [{"a": 1}, {"a": 2}, {"a": 3}], + "bar": [{"a": {"b": 1}}, {"a": {"b": 2}}, + {"a": {"b": 3}}], + "baz": 50 + }, + "cases": [ + { + "expression": "foo[:2].a", + "result": [1, 2] + }, + { + "expression": "foo[:2].b", + "result": [] + }, + { + "expression": "foo[:2].a.b", + "result": [] + }, + { + "expression": "bar[::-1].a.b", + "result": [3, 2, 1] + }, + { + "expression": "bar[:2].a.b", + "result": [1, 2] + }, + { + "expression": "baz[:2].a", + "result": null + } + ] +}, { + "given": [{"a": 1}, {"a": 2}, {"a": 3}], + "cases": [ + { + "expression": "[:]", + "result": [{"a": 1}, {"a": 2}, {"a": 3}] + }, + { + "expression": "[:2].a", + "result": [1, 2] + }, + { + "expression": "[::-1].a", + "result": [3, 2, 1] + }, + { + "expression": "[:2].b", + "result": [] + } + ] +}] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json new file mode 100644 index 0000000000000..003c294588711 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json @@ -0,0 +1,616 @@ +[{ + "comment": "Dot syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo.1", + "error": "syntax" + }, + { + "expression": "foo.-11", + "error": "syntax" + }, + { + "expression": "foo", + "result": null + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": ".foo", + "error": "syntax" + }, + { + "expression": "foo..bar", + "error": "syntax" + }, + { + "expression": "foo.bar.", + "error": "syntax" + }, + { + "expression": "foo[.]", + "error": "syntax" + } + ] +}, + { + "comment": "Simple token errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": ".", + "error": "syntax" + }, + { + "expression": ":", + "error": "syntax" + }, + { + "expression": ",", + "error": "syntax" + }, + { + "expression": "]", + "error": "syntax" + }, + { + "expression": "[", + "error": "syntax" + }, + { + "expression": "}", + "error": "syntax" + }, + { + "expression": "{", + "error": "syntax" + }, + { + "expression": ")", + "error": "syntax" + }, + { + "expression": "(", + "error": "syntax" + }, + { + "expression": "((&", + "error": "syntax" + }, + { + "expression": "a[", + "error": "syntax" + }, + { + "expression": "a]", + "error": "syntax" + }, + { + "expression": "a][", + "error": "syntax" + }, + { + "expression": "!", + "error": "syntax" + } + ] + }, + { + "comment": "Boolean syntax errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "![!(!", + "error": "syntax" + } + ] + }, + { + "comment": "Wildcard syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "*", + "result": ["object"] + }, + { + "expression": "*.*", + "result": [] + }, + { + "expression": "*.foo", + "result": [] + }, + { + "expression": "*[0]", + "result": [] + }, + { + "expression": ".*", + "error": "syntax" + }, + { + "expression": "*foo", + "error": "syntax" + }, + { + "expression": "*0", + "error": "syntax" + }, + { + "expression": "foo[*]bar", + "error": "syntax" + }, + { + "expression": "foo[*]*", + "error": "syntax" + } + ] + }, + { + "comment": "Flatten syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[]", + "result": null + } + ] + }, + { + "comment": "Simple bracket syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[0]", + "result": null + }, + { + "expression": "[*]", + "result": null + }, + { + "expression": "*.[0]", + "error": "syntax" + }, + { + "expression": "*.[\"0\"]", + "result": [[null]] + }, + { + "expression": "[*].bar", + "result": null + }, + { + "expression": "[*][0]", + "result": null + }, + { + "expression": "foo[#]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select list syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[0]", + "result": null + }, + { + "comment": "Valid multi-select of a list", + "expression": "foo[0, 1]", + "error": "syntax" + }, + { + "expression": "foo.[0]", + "error": "syntax" + }, + { + "expression": "foo.[*]", + "result": null + }, + { + "comment": "Multi-select of a list with trailing comma", + "expression": "foo[0, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo[0,", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo.[a", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with extra comma", + "expression": "foo[0,, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using identifier indices", + "expression": "foo[abc, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index with trailing comma", + "expression": "foo[abc, ]", + "error": "syntax" + }, + { + "comment": "Valid multi-select of a hash using an identifier index", + "expression": "foo.[abc]", + "result": null + }, + { + "comment": "Valid multi-select of a hash", + "expression": "foo.[abc, def]", + "result": null + }, + { + "comment": "Multi-select of a hash using a numeric index", + "expression": "foo.[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with a trailing comma", + "expression": "foo.[abc, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with extra commas", + "expression": "foo.[abc,, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash using number indices", + "expression": "foo.[0, 1]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select hash syntax", + "given": {"type": "object"}, + "cases": [ + { + "comment": "No key or value", + "expression": "a{}", + "error": "syntax" + }, + { + "comment": "No closing token", + "expression": "a{", + "error": "syntax" + }, + { + "comment": "Not a key value pair", + "expression": "a{foo}", + "error": "syntax" + }, + { + "comment": "Missing value and closing character", + "expression": "a{foo:", + "error": "syntax" + }, + { + "comment": "Missing closing character", + "expression": "a{foo: 0", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a{foo:}", + "error": "syntax" + }, + { + "comment": "Trailing comma and no closing character", + "expression": "a{foo: 0, ", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a{foo: ,}", + "error": "syntax" + }, + { + "comment": "Accessing Array using an identifier", + "expression": "a{foo: bar}", + "error": "syntax" + }, + { + "expression": "a{foo: 0}", + "error": "syntax" + }, + { + "comment": "Missing key-value pair", + "expression": "a.{}", + "error": "syntax" + }, + { + "comment": "Not a key-value pair", + "expression": "a.{foo}", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a.{foo:}", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a.{foo: ,}", + "error": "syntax" + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar}", + "result": null + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar, baz: bam}", + "result": null + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, }", + "error": "syntax" + }, + { + "comment": "Missing key in second key-value pair", + "expression": "a.{foo: bar, baz}", + "error": "syntax" + }, + { + "comment": "Missing value in second key-value pair", + "expression": "a.{foo: bar, baz:}", + "error": "syntax" + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, baz: bam, }", + "error": "syntax" + }, + { + "comment": "Nested multi select", + "expression": "{\"\\\\\":{\" \":*}}", + "result": {"\\": {" ": ["object"]}} + } + ] + }, + { + "comment": "Or expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo || bar", + "result": null + }, + { + "expression": "foo ||", + "error": "syntax" + }, + { + "expression": "foo.|| bar", + "error": "syntax" + }, + { + "expression": " || foo", + "error": "syntax" + }, + { + "expression": "foo || || foo", + "error": "syntax" + }, + { + "expression": "foo.[a || b]", + "result": null + }, + { + "expression": "foo.[a ||]", + "error": "syntax" + }, + { + "expression": "\"foo", + "error": "syntax" + } + ] + }, + { + "comment": "Filter expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[?bar==`\"baz\"`]", + "result": null + }, + { + "expression": "foo[? bar == `\"baz\"` ]", + "result": null + }, + { + "expression": "foo[ ?bar==`\"baz\"`]", + "error": "syntax" + }, + { + "expression": "foo[?bar==]", + "error": "syntax" + }, + { + "expression": "foo[?==]", + "error": "syntax" + }, + { + "expression": "foo[?==bar]", + "error": "syntax" + }, + { + "expression": "foo[?bar==baz?]", + "error": "syntax" + }, + { + "expression": "foo[?a.b.c==d.e.f]", + "result": null + }, + { + "expression": "foo[?bar==`[0, 1, 2]`]", + "result": null + }, + { + "expression": "foo[?bar==`[\"a\", \"b\", \"c\"]`]", + "result": null + }, + { + "comment": "Literal char not escaped", + "expression": "foo[?bar==`[\"foo`bar\"]`]", + "error": "syntax" + }, + { + "comment": "Literal char escaped", + "expression": "foo[?bar==`[\"foo\\`bar\"]`]", + "result": null + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar<>baz]", + "error": "syntax" + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar^baz]", + "error": "syntax" + }, + { + "expression": "foo[bar==baz]", + "error": "syntax" + }, + { + "comment": "Quoted identifier in filter expression no spaces", + "expression": "[?\"\\\\\">`\"foo\"`]", + "result": null + }, + { + "comment": "Quoted identifier in filter expression with spaces", + "expression": "[?\"\\\\\" > `\"foo\"`]", + "result": null + } + ] + }, + { + "comment": "Filter expression errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "bar.`\"anything\"`", + "error": "syntax" + }, + { + "expression": "bar.baz.noexists.`\"literal\"`", + "error": "syntax" + }, + { + "comment": "Literal wildcard projection", + "expression": "foo[*].`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[*].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`.`\"subliteral\"`", + "error": "syntax" + }, + { + "comment": "Projecting a literal onto an empty list", + "expression": "foo[*].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "twolen[*].`\"foo\"`", + "error": "syntax" + }, + { + "comment": "Two level projection of a literal", + "expression": "twolen[*].threelen[*].`\"bar\"`", + "error": "syntax" + }, + { + "comment": "Two level flattened projection of a literal", + "expression": "twolen[].threelen[].`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Identifiers", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo", + "result": null + }, + { + "expression": "\"foo\"", + "result": null + }, + { + "expression": "\"\\\\\"", + "result": null + } + ] + }, + { + "comment": "Combined syntax", + "given": [], + "cases": [ + { + "expression": "*||*|*|*", + "result": null + }, + { + "expression": "*[]||[*]", + "result": [] + }, + { + "expression": "[*.*]", + "result": [null] + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json new file mode 100644 index 0000000000000..6b07b0b6dae39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json @@ -0,0 +1,38 @@ +[ + { + "given": {"foo": [{"✓": "✓"}, {"✓": "✗"}]}, + "cases": [ + { + "expression": "foo[].\"✓\"", + "result": ["✓", "✗"] + } + ] + }, + { + "given": {"☯": true}, + "cases": [ + { + "expression": "\"☯\"", + "result": true + } + ] + }, + { + "given": {"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪": true}, + "cases": [ + { + "expression": "\"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪\"", + "result": true + } + ] + }, + { + "given": {"☃": true}, + "cases": [ + { + "expression": "\"☃\"", + "result": true + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json new file mode 100644 index 0000000000000..3bcec30281549 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json @@ -0,0 +1,460 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "val" + }, + "other": { + "baz": "val" + }, + "other2": { + "baz": "val" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + }, + "other5": { + "other": { + "a": 1, + "b": 1, + "c": 1 + } + } + } + }, + "cases": [ + { + "expression": "foo.*.baz", + "result": ["val", "val", "val"] + }, + { + "expression": "foo.bar.*", + "result": ["val"] + }, + { + "expression": "foo.*.notbaz", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "foo.*.notbaz[0]", + "result": ["a", "a"] + }, + { + "expression": "foo.*.notbaz[-1]", + "result": ["c", "c"] + } + ] +}, { + "given": { + "foo": { + "first-1": { + "second-1": "val" + }, + "first-2": { + "second-1": "val" + }, + "first-3": { + "second-1": "val" + } + } + }, + "cases": [ + { + "expression": "foo.*", + "result": [{"second-1": "val"}, {"second-1": "val"}, + {"second-1": "val"}] + }, + { + "expression": "foo.*.*", + "result": [["val"], ["val"], ["val"]] + }, + { + "expression": "foo.*.*.*", + "result": [[], [], []] + }, + { + "expression": "foo.*.*.*.*", + "result": [[], [], []] + } + ] +}, { + "given": { + "foo": { + "bar": "one" + }, + "other": { + "bar": "one" + }, + "nomatch": { + "notbar": "three" + } + }, + "cases": [ + { + "expression": "*.bar", + "result": ["one", "one"] + } + ] +}, { + "given": { + "top1": { + "sub1": {"foo": "one"} + }, + "top2": { + "sub1": {"foo": "one"} + } + }, + "cases": [ + { + "expression": "*", + "result": [{"sub1": {"foo": "one"}}, + {"sub1": {"foo": "one"}}] + }, + { + "expression": "*.sub1", + "result": [{"foo": "one"}, + {"foo": "one"}] + }, + { + "expression": "*.*", + "result": [[{"foo": "one"}], + [{"foo": "one"}]] + }, + { + "expression": "*.*.foo[]", + "result": ["one", "one"] + }, + { + "expression": "*.sub1.foo", + "result": ["one", "one"] + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "foo[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": + [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}], + "cases": [ + { + "expression": "[*]", + "result": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}] + }, + { + "expression": "[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": { + "foo": { + "bar": [ + {"baz": ["one", "two", "three"]}, + {"baz": ["four", "five", "six"]}, + {"baz": ["seven", "eight", "nine"]} + ] + } + }, + "cases": [ + { + "expression": "foo.bar[*].baz", + "result": [["one", "two", "three"], ["four", "five", "six"], ["seven", "eight", "nine"]] + }, + { + "expression": "foo.bar[*].baz[0]", + "result": ["one", "four", "seven"] + }, + { + "expression": "foo.bar[*].baz[1]", + "result": ["two", "five", "eight"] + }, + { + "expression": "foo.bar[*].baz[2]", + "result": ["three", "six", "nine"] + }, + { + "expression": "foo.bar[*].baz[3]", + "result": [] + } + ] +}, +{ + "given": { + "foo": { + "bar": [["one", "two"], ["three", "four"]] + } + }, + "cases": [ + { + "expression": "foo.bar[*]", + "result": [["one", "two"], ["three", "four"]] + }, + { + "expression": "foo.bar[0]", + "result": ["one", "two"] + }, + { + "expression": "foo.bar[0][0]", + "result": "one" + }, + { + "expression": "foo.bar[0][0][0]", + "result": null + }, + { + "expression": "foo.bar[0][0][0][0]", + "result": null + }, + { + "expression": "foo[0][0]", + "result": null + } + ] +}, +{ + "given": { + "foo": [ + {"bar": [{"kind": "basic"}, {"kind": "intermediate"}]}, + {"bar": [{"kind": "advanced"}, {"kind": "expert"}]}, + {"bar": "string"} + ] + + }, + "cases": [ + { + "expression": "foo[*].bar[*].kind", + "result": [["basic", "intermediate"], ["advanced", "expert"]] + }, + { + "expression": "foo[*].bar[0].kind", + "result": ["basic", "advanced"] + } + ] +}, +{ + "given": { + "foo": [ + {"bar": {"kind": "basic"}}, + {"bar": {"kind": "intermediate"}}, + {"bar": {"kind": "advanced"}}, + {"bar": {"kind": "expert"}}, + {"bar": "string"} + ] + }, + "cases": [ + { + "expression": "foo[*].bar.kind", + "result": ["basic", "intermediate", "advanced", "expert"] + } + ] +}, +{ + "given": { + "foo": [{"bar": ["one", "two"]}, {"bar": ["three", "four"]}, {"bar": ["five"]}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*].bar[1]", + "result": ["two", "four"] + }, + { + "expression": "foo[*].bar[2]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{"bar": []}, {"bar": []}, {"bar": []}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [["one", "two"], ["three", "four"], ["five"]] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*][1]", + "result": ["two", "four"] + } + ] +}, +{ + "given": { + "foo": [ + [ + ["one", "two"], ["three", "four"] + ], [ + ["five", "six"], ["seven", "eight"] + ], [ + ["nine"], ["ten"] + ] + ] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": [["one", "two"], ["five", "six"], ["nine"]] + }, + { + "expression": "foo[*][1]", + "result": [["three", "four"], ["seven", "eight"], ["ten"]] + }, + { + "expression": "foo[*][0][0]", + "result": ["one", "five", "nine"] + }, + { + "expression": "foo[*][1][0]", + "result": ["three", "seven", "ten"] + }, + { + "expression": "foo[*][0][1]", + "result": ["two", "six"] + }, + { + "expression": "foo[*][1][1]", + "result": ["four", "eight"] + }, + { + "expression": "foo[*][2]", + "result": [] + }, + { + "expression": "foo[*][2][2]", + "result": [] + }, + { + "expression": "bar[*]", + "result": null + }, + { + "expression": "bar[*].baz[*]", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[*]", + "result": null + }, + { + "expression": "hash[*]", + "result": null + }, + { + "expression": "number[*]", + "result": null + }, + { + "expression": "nullvalue[*]", + "result": null + }, + { + "expression": "string[*].foo", + "result": null + }, + { + "expression": "hash[*].foo", + "result": null + }, + { + "expression": "number[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo[*].bar", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "val", "bar": "val"}, + "number": 23, + "array": [1, 2, 3], + "nullvalue": null + }, + "cases": [ + { + "expression": "string.*", + "result": null + }, + { + "expression": "hash.*", + "result": ["val", "val"] + }, + { + "expression": "number.*", + "result": null + }, + { + "expression": "array.*", + "result": null + }, + { + "expression": "nullvalue.*", + "result": null + } + ] +}, +{ + "given": { + "a": [0, 1, 2], + "b": [0, 1, 2] + }, + "cases": [ + { + "expression": "*[0]", + "result": [0, 0] + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance_test.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance_test.go new file mode 100644 index 0000000000000..4ee9c959dc6c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance_test.go @@ -0,0 +1,123 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestSuite struct { + Given interface{} + TestCases []TestCase `json:"cases"` + Comment string +} +type TestCase struct { + Comment string + Expression string + Result interface{} + Error string +} + +var whiteListed = []string{ + "compliance/basic.json", + "compliance/current.json", + "compliance/escape.json", + "compliance/filters.json", + "compliance/functions.json", + "compliance/identifiers.json", + "compliance/indices.json", + "compliance/literal.json", + "compliance/multiselect.json", + "compliance/ormatch.json", + "compliance/pipe.json", + "compliance/slice.json", + "compliance/syntax.json", + "compliance/unicode.json", + "compliance/wildcard.json", + "compliance/boolean.json", +} + +func allowed(path string) bool { + for _, el := range whiteListed { + if el == path { + return true + } + } + return false +} + +func TestCompliance(t *testing.T) { + assert := assert.New(t) + + var complianceFiles []string + err := filepath.Walk("compliance", func(path string, _ os.FileInfo, _ error) error { + //if strings.HasSuffix(path, ".json") { + if allowed(path) { + complianceFiles = append(complianceFiles, path) + } + return nil + }) + if assert.Nil(err) { + for _, filename := range complianceFiles { + runComplianceTest(assert, filename) + } + } +} + +func runComplianceTest(assert *assert.Assertions, filename string) { + var testSuites []TestSuite + data, err := ioutil.ReadFile(filename) + if assert.Nil(err) { + err := json.Unmarshal(data, &testSuites) + if assert.Nil(err) { + for _, testsuite := range testSuites { + runTestSuite(assert, testsuite, filename) + } + } + } +} + +func runTestSuite(assert *assert.Assertions, testsuite TestSuite, filename string) { + for _, testcase := range testsuite.TestCases { + if testcase.Error != "" { + // This is a test case that verifies we error out properly. + runSyntaxTestCase(assert, testsuite.Given, testcase, filename) + } else { + runTestCase(assert, testsuite.Given, testcase, filename) + } + } +} + +func runSyntaxTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + // Anything with an .Error means that we expect that JMESPath should return + // an error when we try to evaluate the expression. + _, err := Search(testcase.Expression, given) + assert.NotNil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) +} + +func runTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + lexer := NewLexer() + var err error + _, err = lexer.tokenize(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not lex expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + parser := NewParser() + _, err = parser.Parse(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not parse expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + actual, err := Search(testcase.Expression, given) + if assert.Nil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) { + assert.Equal(testcase.Result, actual, fmt.Sprintf("Expression: %s", testcase.Expression)) + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 0000000000000..8a3f2ef0dce88 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,840 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": functionEntry{ + name: "length", + arguments: []argSpec{ + argSpec{types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": functionEntry{ + name: "starts_with", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": functionEntry{ + name: "abs", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": functionEntry{ + name: "avg", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": functionEntry{ + name: "ceil", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": functionEntry{ + name: "contains", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray, jpString}}, + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": functionEntry{ + name: "ends_with", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": functionEntry{ + name: "floor", + arguments: []argSpec{ + argSpec{types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": functionEntry{ + name: "amp", + arguments: []argSpec{ + argSpec{types: []jpType{jpExpref}}, + argSpec{types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": functionEntry{ + name: "max", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": functionEntry{ + name: "merge", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": functionEntry{ + name: "max_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": functionEntry{ + name: "sum", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": functionEntry{ + name: "min", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": functionEntry{ + name: "min_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": functionEntry{ + name: "type", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": functionEntry{ + name: "keys", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": functionEntry{ + name: "values", + arguments: []argSpec{ + argSpec{types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": functionEntry{ + name: "sort", + arguments: []argSpec{ + argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": functionEntry{ + name: "sort_by", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": functionEntry{ + name: "join", + arguments: []argSpec{ + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": functionEntry{ + name: "reverse", + arguments: []argSpec{ + argSpec{types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": functionEntry{ + name: "to_array", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": functionEntry{ + name: "to_string", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": functionEntry{ + name: "to_number", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": functionEntry{ + name: "not_null", + arguments: []argSpec{ + argSpec{types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if _, ok := arg.([]interface{}); ok { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if c, ok := arg.([]interface{}); ok { + return float64(len(c)), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-1 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-1 new file mode 100644 index 0000000000000..19102815663d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-1 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-10 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-10 new file mode 100644 index 0000000000000..4d5f9756e551b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-10 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-100 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-100 new file mode 100644 index 0000000000000..bc4f6a3f49c82 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-100 @@ -0,0 +1 @@ +ends_with(str, 'SStr') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-101 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-101 new file mode 100644 index 0000000000000..81bf07a7a1bee --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-101 @@ -0,0 +1 @@ +ends_with(str, 'foo') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-102 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-102 new file mode 100644 index 0000000000000..3225de9139958 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-102 @@ -0,0 +1 @@ +floor(`1.2`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-103 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-103 new file mode 100644 index 0000000000000..8cac959582274 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-103 @@ -0,0 +1 @@ +floor(decimals[0]) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-104 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-104 new file mode 100644 index 0000000000000..bd76f47e2165e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-104 @@ -0,0 +1 @@ +floor(foo) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-105 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-105 new file mode 100644 index 0000000000000..c719add3deb08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-105 @@ -0,0 +1 @@ +length('abc') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-106 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-106 new file mode 100644 index 0000000000000..ff12f04f14874 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-106 @@ -0,0 +1 @@ +length('') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-107 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-107 new file mode 100644 index 0000000000000..0eccba1d3a0ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-107 @@ -0,0 +1 @@ +length(@) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-108 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-108 new file mode 100644 index 0000000000000..ab14b0fa8e6ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-108 @@ -0,0 +1 @@ +length(strings[0]) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-109 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-109 new file mode 100644 index 0000000000000..f1514bb7438c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-109 @@ -0,0 +1 @@ +length(str) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-110 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-110 new file mode 100644 index 0000000000000..09276059a23a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-110 @@ -0,0 +1 @@ +length(array) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-112 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-112 new file mode 100644 index 0000000000000..ab14b0fa8e6ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-112 @@ -0,0 +1 @@ +length(strings[0]) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-115 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-115 new file mode 100644 index 0000000000000..bfb41ae98757c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-115 @@ -0,0 +1 @@ +max(strings) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-118 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-118 new file mode 100644 index 0000000000000..915ec172ae782 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-118 @@ -0,0 +1 @@ +merge(`{}`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-119 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-119 new file mode 100644 index 0000000000000..5b74e9b593f5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-119 @@ -0,0 +1 @@ +merge(`{}`, `{}`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-12 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-12 new file mode 100644 index 0000000000000..64c5e5885a4b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-12 @@ -0,0 +1 @@ +two \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-120 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-120 new file mode 100644 index 0000000000000..f34dcd8fade11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-120 @@ -0,0 +1 @@ +merge(`{"a": 1}`, `{"b": 2}`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-121 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-121 new file mode 100644 index 0000000000000..e335dc96feaf8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-121 @@ -0,0 +1 @@ +merge(`{"a": 1}`, `{"a": 2}`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-122 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-122 new file mode 100644 index 0000000000000..aac28fffeb3eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-122 @@ -0,0 +1 @@ +merge(`{"a": 1, "b": 2}`, `{"a": 2, "c": 3}`, `{"d": 4}`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-123 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-123 new file mode 100644 index 0000000000000..1c6fd67198e89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-123 @@ -0,0 +1 @@ +min(numbers) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-126 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-126 new file mode 100644 index 0000000000000..93e68db775981 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-126 @@ -0,0 +1 @@ +min(decimals) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-128 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-128 new file mode 100644 index 0000000000000..554601ea4270c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-128 @@ -0,0 +1 @@ +type('abc') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-129 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-129 new file mode 100644 index 0000000000000..1ab2d9834f800 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-129 @@ -0,0 +1 @@ +type(`1.0`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-13 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-13 new file mode 100644 index 0000000000000..1d19714ffbc27 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-13 @@ -0,0 +1 @@ +three \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-130 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-130 new file mode 100644 index 0000000000000..3cee2f56f1579 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-130 @@ -0,0 +1 @@ +type(`2`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-131 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-131 new file mode 100644 index 0000000000000..4821f9aefcf1e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-131 @@ -0,0 +1 @@ +type(`true`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-132 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-132 new file mode 100644 index 0000000000000..40b6913a6c0eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-132 @@ -0,0 +1 @@ +type(`false`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-133 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-133 new file mode 100644 index 0000000000000..c711252be28de --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-133 @@ -0,0 +1 @@ +type(`null`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-134 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-134 new file mode 100644 index 0000000000000..ec5d07e95cc7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-134 @@ -0,0 +1 @@ +type(`[0]`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-135 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-135 new file mode 100644 index 0000000000000..2080401e1eae1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-135 @@ -0,0 +1 @@ +type(`{"a": "b"}`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-136 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-136 new file mode 100644 index 0000000000000..c5ee2ba5cb8ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-136 @@ -0,0 +1 @@ +type(@) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-137 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-137 new file mode 100644 index 0000000000000..1814ca17b875e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-137 @@ -0,0 +1 @@ +keys(objects) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-138 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-138 new file mode 100644 index 0000000000000..e03cdb0d640e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-138 @@ -0,0 +1 @@ +values(objects) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-139 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-139 new file mode 100644 index 0000000000000..7fea8d2ce5c00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-139 @@ -0,0 +1 @@ +keys(empty_hash) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-14 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-14 new file mode 100644 index 0000000000000..a17c92f597470 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-14 @@ -0,0 +1 @@ +one.two \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-140 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-140 new file mode 100644 index 0000000000000..4f1d882a406a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-140 @@ -0,0 +1 @@ +join(', ', strings) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-141 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-141 new file mode 100644 index 0000000000000..4f1d882a406a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-141 @@ -0,0 +1 @@ +join(', ', strings) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-142 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-142 new file mode 100644 index 0000000000000..19ec1fe090088 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-142 @@ -0,0 +1 @@ +join(',', `["a", "b"]`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-143 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-143 new file mode 100644 index 0000000000000..761c68a6b5959 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-143 @@ -0,0 +1 @@ +join('|', strings) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-144 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-144 new file mode 100644 index 0000000000000..a0dd68eaa908b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-144 @@ -0,0 +1 @@ +join('|', decimals[].to_string(@)) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-145 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-145 new file mode 100644 index 0000000000000..a4190b2ba25e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-145 @@ -0,0 +1 @@ +join('|', empty_list) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-146 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-146 new file mode 100644 index 0000000000000..f5033c30223a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-146 @@ -0,0 +1 @@ +reverse(numbers) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-147 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-147 new file mode 100644 index 0000000000000..822f054d50c7a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-147 @@ -0,0 +1 @@ +reverse(array) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-148 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-148 new file mode 100644 index 0000000000000..a584adcc0c5a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-148 @@ -0,0 +1 @@ +reverse(`[]`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-149 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-149 new file mode 100644 index 0000000000000..fb4cc5dc482b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-149 @@ -0,0 +1 @@ +reverse('') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-15 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-15 new file mode 100644 index 0000000000000..693f954962710 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-15 @@ -0,0 +1 @@ +foo."1" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-150 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-150 new file mode 100644 index 0000000000000..aa260fabc175b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-150 @@ -0,0 +1 @@ +reverse('hello world') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-151 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-151 new file mode 100644 index 0000000000000..d8c58826afda6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-151 @@ -0,0 +1 @@ +starts_with(str, 'S') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-152 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-152 new file mode 100644 index 0000000000000..32e16b7bb002d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-152 @@ -0,0 +1 @@ +starts_with(str, 'St') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-153 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-153 new file mode 100644 index 0000000000000..5f575ae7fc4c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-153 @@ -0,0 +1 @@ +starts_with(str, 'Str') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-155 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-155 new file mode 100644 index 0000000000000..f31551c62fd63 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-155 @@ -0,0 +1 @@ +sum(numbers) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-156 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-156 new file mode 100644 index 0000000000000..18b90446cc918 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-156 @@ -0,0 +1 @@ +sum(decimals) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-157 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-157 new file mode 100644 index 0000000000000..def4d0bc19c69 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-157 @@ -0,0 +1 @@ +sum(array[].to_number(@)) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-158 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-158 new file mode 100644 index 0000000000000..48e4a7707c740 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-158 @@ -0,0 +1 @@ +sum(`[]`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-159 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-159 new file mode 100644 index 0000000000000..9fb939a0b1b4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-159 @@ -0,0 +1 @@ +to_array('foo') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-16 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-16 new file mode 100644 index 0000000000000..86155ed75462a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-16 @@ -0,0 +1 @@ +foo."1"[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-160 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-160 new file mode 100644 index 0000000000000..74ba7cc67dbe4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-160 @@ -0,0 +1 @@ +to_array(`0`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-161 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-161 new file mode 100644 index 0000000000000..57f8b983f08f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-161 @@ -0,0 +1 @@ +to_array(objects) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-162 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-162 new file mode 100644 index 0000000000000..d17c7345faf07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-162 @@ -0,0 +1 @@ +to_array(`[1, 2, 3]`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-163 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-163 new file mode 100644 index 0000000000000..15f70f7839944 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-163 @@ -0,0 +1 @@ +to_array(false) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-164 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-164 new file mode 100644 index 0000000000000..9b227529b409d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-164 @@ -0,0 +1 @@ +to_string('foo') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-165 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-165 new file mode 100644 index 0000000000000..489a42935a68b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-165 @@ -0,0 +1 @@ +to_string(`1.2`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-166 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-166 new file mode 100644 index 0000000000000..d17106a00f50a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-166 @@ -0,0 +1 @@ +to_string(`[0, 1]`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-167 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-167 new file mode 100644 index 0000000000000..4f4ae9e689007 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-167 @@ -0,0 +1 @@ +to_number('1.0') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-168 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-168 new file mode 100644 index 0000000000000..ce932e2e6ac5a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-168 @@ -0,0 +1 @@ +to_number('1.1') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-169 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-169 new file mode 100644 index 0000000000000..e246fa4dbcbd1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-169 @@ -0,0 +1 @@ +to_number('4') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-17 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-17 new file mode 100644 index 0000000000000..de0b4c39d99fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-17 @@ -0,0 +1 @@ +foo."-1" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-170 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-170 new file mode 100644 index 0000000000000..f8c264747e945 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-170 @@ -0,0 +1 @@ +to_number('notanumber') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-171 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-171 new file mode 100644 index 0000000000000..7d423b1cd19c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-171 @@ -0,0 +1 @@ +to_number(`false`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-172 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-172 new file mode 100644 index 0000000000000..503716b687c6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-172 @@ -0,0 +1 @@ +to_number(`null`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-173 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-173 new file mode 100644 index 0000000000000..7f61dfa157b78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-173 @@ -0,0 +1 @@ +to_number(`[0]`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-174 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-174 new file mode 100644 index 0000000000000..ee72a8c01ac5d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-174 @@ -0,0 +1 @@ +to_number(`{"foo": 0}`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-175 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-175 new file mode 100644 index 0000000000000..8d8f1f7590641 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-175 @@ -0,0 +1 @@ +sort(numbers) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-178 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-178 new file mode 100644 index 0000000000000..8cb54ba47b3f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-178 @@ -0,0 +1 @@ +sort(empty_list) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-179 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-179 new file mode 100644 index 0000000000000..cf2c9b1db2ed7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-179 @@ -0,0 +1 @@ +not_null(unknown_key, str) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-18 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-18 new file mode 100644 index 0000000000000..b516b2c489f1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-18 @@ -0,0 +1 @@ +@ \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-180 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-180 new file mode 100644 index 0000000000000..e047d48668225 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-180 @@ -0,0 +1 @@ +not_null(unknown_key, foo.bar, empty_list, str) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-181 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-181 new file mode 100644 index 0000000000000..c4cc87b9c1289 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-181 @@ -0,0 +1 @@ +not_null(unknown_key, null_key, empty_list, str) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-182 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-182 new file mode 100644 index 0000000000000..2c7fa0a9c1ecd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-182 @@ -0,0 +1 @@ +not_null(all, expressions, are_null) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-183 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-183 new file mode 100644 index 0000000000000..eb096e61cdd1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-183 @@ -0,0 +1 @@ +numbers[].to_string(@) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-184 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-184 new file mode 100644 index 0000000000000..4958abaec45b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-184 @@ -0,0 +1 @@ +array[].to_number(@) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-185 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-185 new file mode 100644 index 0000000000000..1027084724dfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-185 @@ -0,0 +1 @@ +foo[].not_null(f, e, d, c, b, a) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-186 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-186 new file mode 100644 index 0000000000000..83cb91612827a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-186 @@ -0,0 +1 @@ +sort_by(people, &age) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-187 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-187 new file mode 100644 index 0000000000000..a494d6c4bf11f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-187 @@ -0,0 +1 @@ +sort_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-188 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-188 new file mode 100644 index 0000000000000..2294fc54d126f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-188 @@ -0,0 +1 @@ +sort_by(people, &age)[].name \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-189 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-189 new file mode 100644 index 0000000000000..bb8c2b46d1f56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-189 @@ -0,0 +1 @@ +sort_by(people, &age)[].extra \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-19 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-19 new file mode 100644 index 0000000000000..e3ed49ac65315 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-19 @@ -0,0 +1 @@ +@.bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-190 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-190 new file mode 100644 index 0000000000000..3ab0290340ae2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-190 @@ -0,0 +1 @@ +sort_by(`[]`, &age) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-191 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-191 new file mode 100644 index 0000000000000..97db56f7b67ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-191 @@ -0,0 +1 @@ +max_by(people, &age) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-192 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-192 new file mode 100644 index 0000000000000..a7e648de96325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-192 @@ -0,0 +1 @@ +max_by(people, &age_str) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-193 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-193 new file mode 100644 index 0000000000000..be4348d0c1e78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-193 @@ -0,0 +1 @@ +max_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-194 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-194 new file mode 100644 index 0000000000000..a707283d4997a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-194 @@ -0,0 +1 @@ +min_by(people, &age) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-195 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-195 new file mode 100644 index 0000000000000..2cd6618d84ba0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-195 @@ -0,0 +1 @@ +min_by(people, &age_str) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-196 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-196 new file mode 100644 index 0000000000000..833e6837382f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-196 @@ -0,0 +1 @@ +min_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-198 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-198 new file mode 100644 index 0000000000000..706dbda89a815 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-198 @@ -0,0 +1 @@ +__L \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-199 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-199 new file mode 100644 index 0000000000000..ca593ca93c16d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-199 @@ -0,0 +1 @@ +"!\r" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-2 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-2 new file mode 100644 index 0000000000000..4d5f9756e551b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-2 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-20 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-20 new file mode 100644 index 0000000000000..f300ab91782dc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-20 @@ -0,0 +1 @@ +@.foo[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-200 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-200 new file mode 100644 index 0000000000000..9c93843541dfb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-200 @@ -0,0 +1 @@ +Y_1623 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-201 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-201 new file mode 100644 index 0000000000000..c1b0730e01334 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-201 @@ -0,0 +1 @@ +x \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-202 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-202 new file mode 100644 index 0000000000000..1552ec63a6fb9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-202 @@ -0,0 +1 @@ +"\tF\uCebb" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-203 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-203 new file mode 100644 index 0000000000000..047041273f085 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-203 @@ -0,0 +1 @@ +" \t" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-204 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-204 new file mode 100644 index 0000000000000..efd782cc3250c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-204 @@ -0,0 +1 @@ +" " \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-205 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-205 new file mode 100644 index 0000000000000..8494ac2706471 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-205 @@ -0,0 +1 @@ +v2 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-206 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-206 new file mode 100644 index 0000000000000..c61f7f7ebd80e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-206 @@ -0,0 +1 @@ +"\t" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-207 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-207 new file mode 100644 index 0000000000000..f6055f1898fee --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-207 @@ -0,0 +1 @@ +_X \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-208 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-208 new file mode 100644 index 0000000000000..4f58e0e7bd24e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-208 @@ -0,0 +1 @@ +"\t4\ud9da\udd15" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-209 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-209 new file mode 100644 index 0000000000000..f536bfbf6870d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-209 @@ -0,0 +1 @@ +v24_W \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-21 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-21 new file mode 100644 index 0000000000000..ef47ff2c056e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-21 @@ -0,0 +1 @@ +"foo.bar" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-210 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-210 new file mode 100644 index 0000000000000..69759281cb448 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-210 @@ -0,0 +1 @@ +"H" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-211 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-211 new file mode 100644 index 0000000000000..c3e8b5927fb1c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-211 @@ -0,0 +1 @@ +"\f" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-212 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-212 new file mode 100644 index 0000000000000..24ecc222cbfae --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-212 @@ -0,0 +1 @@ +"E4" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-213 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-213 new file mode 100644 index 0000000000000..5693009d2e4b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-213 @@ -0,0 +1 @@ +"!" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-214 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-214 new file mode 100644 index 0000000000000..62dd220e71979 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-214 @@ -0,0 +1 @@ +tM \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-215 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-215 new file mode 100644 index 0000000000000..3c1e81f55ae4a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-215 @@ -0,0 +1 @@ +" [" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-216 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-216 new file mode 100644 index 0000000000000..493daa673c4fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-216 @@ -0,0 +1 @@ +"R!" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-217 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-217 new file mode 100644 index 0000000000000..116b50ab38b47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-217 @@ -0,0 +1 @@ +_6W \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-218 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-218 new file mode 100644 index 0000000000000..0073fac4520da --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-218 @@ -0,0 +1 @@ +"\uaBA1\r" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-219 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-219 new file mode 100644 index 0000000000000..00d8fa37eebec --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-219 @@ -0,0 +1 @@ +tL7 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-22 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-22 new file mode 100644 index 0000000000000..661ebcfa3373f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-22 @@ -0,0 +1 @@ +"foo bar" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-220 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-220 new file mode 100644 index 0000000000000..c14f16e0264e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-220 @@ -0,0 +1 @@ +"<" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-257 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-257 new file mode 100644 index 0000000000000..8a2443e6e900e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-257 @@ -0,0 +1 @@ +hvu \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-258 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-258 new file mode 100644 index 0000000000000..c9ddacbb61f7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-258 @@ -0,0 +1 @@ +"; !" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-259 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-259 new file mode 100644 index 0000000000000..d0209c6df0aab --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-259 @@ -0,0 +1 @@ +hU \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-26 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-26 new file mode 100644 index 0000000000000..82649bd24eef1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-26 @@ -0,0 +1 @@ +"/unix/path" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-260 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-260 new file mode 100644 index 0000000000000..c07242aa44bc9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-260 @@ -0,0 +1 @@ +"!I\n\/" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-261 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-261 new file mode 100644 index 0000000000000..7aae4effc7cd5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-261 @@ -0,0 +1 @@ +"\uEEbF" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-262 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-262 new file mode 100644 index 0000000000000..c1574f35ffab5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-262 @@ -0,0 +1 @@ +"U)\t" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-263 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-263 new file mode 100644 index 0000000000000..5197e3a2bf56a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-263 @@ -0,0 +1 @@ +fa0_9 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-264 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-264 new file mode 100644 index 0000000000000..320558b005369 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-264 @@ -0,0 +1 @@ +"/" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-265 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-265 new file mode 100644 index 0000000000000..4a2cb0865277b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-265 @@ -0,0 +1 @@ +Gy \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-266 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-266 new file mode 100644 index 0000000000000..9524c83813c0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-266 @@ -0,0 +1 @@ +"\b" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-267 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-267 new file mode 100644 index 0000000000000..066b8d98b78fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-267 @@ -0,0 +1 @@ +"<" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-268 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-268 new file mode 100644 index 0000000000000..c61f7f7ebd80e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-268 @@ -0,0 +1 @@ +"\t" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-269 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-269 new file mode 100644 index 0000000000000..a582f62d282d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-269 @@ -0,0 +1 @@ +"\t&\\\r" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-27 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-27 new file mode 100644 index 0000000000000..a1d50731c78b7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-27 @@ -0,0 +1 @@ +"\"\"\"" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-270 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-270 new file mode 100644 index 0000000000000..e3c5eedeb591e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-270 @@ -0,0 +1 @@ +"#" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-271 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-271 new file mode 100644 index 0000000000000..e75309a5241a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-271 @@ -0,0 +1 @@ +B__ \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-272 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-272 new file mode 100644 index 0000000000000..027177272c5ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-272 @@ -0,0 +1 @@ +"\nS \n" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-273 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-273 new file mode 100644 index 0000000000000..99432276ec43b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-273 @@ -0,0 +1 @@ +Bp \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-274 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-274 new file mode 100644 index 0000000000000..d4f8a788b80fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-274 @@ -0,0 +1 @@ +",\t;" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-275 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-275 new file mode 100644 index 0000000000000..56c384f753056 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-275 @@ -0,0 +1 @@ +B_q \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-276 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-276 new file mode 100644 index 0000000000000..f093d2aa3bceb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-276 @@ -0,0 +1 @@ +"\/+\t\n\b!Z" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-277 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-277 new file mode 100644 index 0000000000000..11e1229d9e320 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-277 @@ -0,0 +1 @@ +"󇟇\\ueFAc" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-278 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-278 new file mode 100644 index 0000000000000..90dbfcfcd35fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-278 @@ -0,0 +1 @@ +":\f" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-279 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-279 new file mode 100644 index 0000000000000..b06b830252499 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-279 @@ -0,0 +1 @@ +"\/" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-28 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-28 new file mode 100644 index 0000000000000..5f55d73af661c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-28 @@ -0,0 +1 @@ +"bar"."baz" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-280 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-280 new file mode 100644 index 0000000000000..0e4bf7c113b28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-280 @@ -0,0 +1 @@ +_BW_6Hg_Gl \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-281 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-281 new file mode 100644 index 0000000000000..81bb45f80594a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-281 @@ -0,0 +1 @@ +"􃰂" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-282 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-282 new file mode 100644 index 0000000000000..d0b4de1464263 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-282 @@ -0,0 +1 @@ +zs1DC \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-283 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-283 new file mode 100644 index 0000000000000..68797580caa50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-283 @@ -0,0 +1 @@ +__434 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-284 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-284 new file mode 100644 index 0000000000000..e61be91c4af62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-284 @@ -0,0 +1 @@ +"󵅁" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-285 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-285 new file mode 100644 index 0000000000000..026cb9cbb5163 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-285 @@ -0,0 +1 @@ +Z_5 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-286 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-286 new file mode 100644 index 0000000000000..ca9587d06c0e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-286 @@ -0,0 +1 @@ +z_M_ \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-287 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-287 new file mode 100644 index 0000000000000..67f6d9c42a3c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-287 @@ -0,0 +1 @@ +YU_2 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-288 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-288 new file mode 100644 index 0000000000000..927ab653a7341 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-288 @@ -0,0 +1 @@ +_0 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-289 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-289 new file mode 100644 index 0000000000000..39307ab938e9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-289 @@ -0,0 +1 @@ +"\b+" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-29 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-29 new file mode 100644 index 0000000000000..8b0c5b41bd9df --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-29 @@ -0,0 +1 @@ +foo[?name == 'a'] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-290 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-290 new file mode 100644 index 0000000000000..a3ec2ed7a1227 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-290 @@ -0,0 +1 @@ +"\"" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-291 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-291 new file mode 100644 index 0000000000000..26bf7e122de50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-291 @@ -0,0 +1 @@ +D7 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-292 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-292 new file mode 100644 index 0000000000000..d595c9f43a91e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-292 @@ -0,0 +1 @@ +_62L \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-293 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-293 new file mode 100644 index 0000000000000..f6869694967bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-293 @@ -0,0 +1 @@ +"\tK\t" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-294 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-294 new file mode 100644 index 0000000000000..f3a9b7edb50b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-294 @@ -0,0 +1 @@ +"\n\\\f" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-295 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-295 new file mode 100644 index 0000000000000..455f00ffc29b7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-295 @@ -0,0 +1 @@ +I_ \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-296 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-296 new file mode 100644 index 0000000000000..ccd5968f9c1e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-296 @@ -0,0 +1 @@ +W_a0_ \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-297 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-297 new file mode 100644 index 0000000000000..ee55c16fc818e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-297 @@ -0,0 +1 @@ +BQ \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-298 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-298 new file mode 100644 index 0000000000000..0d1a169a67a7a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-298 @@ -0,0 +1 @@ +"\tX$\uABBb" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-299 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-299 new file mode 100644 index 0000000000000..0573cfd73fdf1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-299 @@ -0,0 +1 @@ +Z9 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-3 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-3 new file mode 100644 index 0000000000000..f0fcbd8eaf41b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-3 @@ -0,0 +1 @@ +foo.bar.baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-30 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-30 new file mode 100644 index 0000000000000..4f8e6a17aab51 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-30 @@ -0,0 +1 @@ +*[?[0] == `0`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-300 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-300 new file mode 100644 index 0000000000000..a0db02bebf1ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-300 @@ -0,0 +1 @@ +"\b%\"򞄏" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-301 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-301 new file mode 100644 index 0000000000000..56032f7a248ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-301 @@ -0,0 +1 @@ +_F \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-302 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-302 new file mode 100644 index 0000000000000..4a8a3cff369eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-302 @@ -0,0 +1 @@ +"!," \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-303 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-303 new file mode 100644 index 0000000000000..7c1efac0004ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-303 @@ -0,0 +1 @@ +"\"!" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-304 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-304 new file mode 100644 index 0000000000000..a0f489d532f00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-304 @@ -0,0 +1 @@ +Hh \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-305 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-305 new file mode 100644 index 0000000000000..c64e8d5ac8bfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-305 @@ -0,0 +1 @@ +"&" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-306 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-306 new file mode 100644 index 0000000000000..0567e992f143b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-306 @@ -0,0 +1 @@ +"9\r\\R" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-307 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-307 new file mode 100644 index 0000000000000..ce8245c5b9773 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-307 @@ -0,0 +1 @@ +M_k \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-308 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-308 new file mode 100644 index 0000000000000..8f16a5ac048c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-308 @@ -0,0 +1 @@ +"!\b\n󑩒\"\"" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-309 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-309 new file mode 100644 index 0000000000000..504ff5ae39f3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-309 @@ -0,0 +1 @@ +"6" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-31 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-31 new file mode 100644 index 0000000000000..07fb57234b8cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-31 @@ -0,0 +1 @@ +foo[?first == last] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-310 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-310 new file mode 100644 index 0000000000000..533dd8e5460f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-310 @@ -0,0 +1 @@ +_7 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-311 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-311 new file mode 100644 index 0000000000000..1e4a3a34155da --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-311 @@ -0,0 +1 @@ +"0" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-312 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-312 new file mode 100644 index 0000000000000..37961f6ca424d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-312 @@ -0,0 +1 @@ +"\\8\\" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-313 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-313 new file mode 100644 index 0000000000000..23480cff14c95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-313 @@ -0,0 +1 @@ +b7eo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-314 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-314 new file mode 100644 index 0000000000000..e609f81a3b8a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-314 @@ -0,0 +1 @@ +xIUo9 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-315 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-315 new file mode 100644 index 0000000000000..d89a25f0b97c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-315 @@ -0,0 +1 @@ +"5" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-316 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-316 new file mode 100644 index 0000000000000..5adcf5e7dc59f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-316 @@ -0,0 +1 @@ +"?" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-317 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-317 new file mode 100644 index 0000000000000..ace4a897d333a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-317 @@ -0,0 +1 @@ +sU \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318 new file mode 100644 index 0000000000000..feffb7061b348 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318 @@ -0,0 +1 @@ +"VH2&H\\\/" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-319 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-319 new file mode 100644 index 0000000000000..8223f1e51e3be --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-319 @@ -0,0 +1 @@ +_C \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-32 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-32 new file mode 100644 index 0000000000000..7e85c4bdfe977 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-32 @@ -0,0 +1 @@ +foo[?first == last].first \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-320 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-320 new file mode 100644 index 0000000000000..c9cdc63b07017 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-320 @@ -0,0 +1 @@ +_ \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-321 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-321 new file mode 100644 index 0000000000000..c82f7982ee0fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-321 @@ -0,0 +1 @@ +"<\t" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-322 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-322 new file mode 100644 index 0000000000000..dae65c515546b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-322 @@ -0,0 +1 @@ +"\uD834\uDD1E" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-323 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-323 new file mode 100644 index 0000000000000..b6b3695438aaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-323 @@ -0,0 +1 @@ +foo.bar[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-324 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-324 new file mode 100644 index 0000000000000..bf06e678c1518 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-324 @@ -0,0 +1 @@ +foo.bar[1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-325 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-325 new file mode 100644 index 0000000000000..5d48e0205ce42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-325 @@ -0,0 +1 @@ +foo.bar[2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-326 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-326 new file mode 100644 index 0000000000000..de3af7230f98c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-326 @@ -0,0 +1 @@ +foo.bar[3] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-327 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-327 new file mode 100644 index 0000000000000..a1c33350841ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-327 @@ -0,0 +1 @@ +foo.bar[-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-328 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-328 new file mode 100644 index 0000000000000..ad0fef91cfa32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-328 @@ -0,0 +1 @@ +foo.bar[-2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-329 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-329 new file mode 100644 index 0000000000000..3e83c6f730630 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-329 @@ -0,0 +1 @@ +foo.bar[-3] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-33 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-33 new file mode 100644 index 0000000000000..72fc0a53e79d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-33 @@ -0,0 +1 @@ +foo[?age > `25`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-330 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-330 new file mode 100644 index 0000000000000..433a737d65eaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-330 @@ -0,0 +1 @@ +foo.bar[-4] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-331 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-331 new file mode 100644 index 0000000000000..4d5f9756e551b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-331 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-332 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-332 new file mode 100644 index 0000000000000..5e0d9b7177291 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-332 @@ -0,0 +1 @@ +foo[0].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-333 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-333 new file mode 100644 index 0000000000000..3cd7e9460fe57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-333 @@ -0,0 +1 @@ +foo[1].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-334 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-334 new file mode 100644 index 0000000000000..74cb176555823 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-334 @@ -0,0 +1 @@ +foo[2].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-335 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-335 new file mode 100644 index 0000000000000..3cf2007f70d9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-335 @@ -0,0 +1 @@ +foo[3].notbar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-336 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-336 new file mode 100644 index 0000000000000..9674d88037531 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-336 @@ -0,0 +1 @@ +foo[3].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-337 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-337 new file mode 100644 index 0000000000000..9b0b2f8189b81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-337 @@ -0,0 +1 @@ +foo[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-338 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-338 new file mode 100644 index 0000000000000..83c639a185b25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-338 @@ -0,0 +1 @@ +foo[1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-339 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-339 new file mode 100644 index 0000000000000..3b76c9f64a415 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-339 @@ -0,0 +1 @@ +foo[2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-34 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-34 new file mode 100644 index 0000000000000..9a2b0184edf10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-34 @@ -0,0 +1 @@ +foo[?age >= `25`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-340 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-340 new file mode 100644 index 0000000000000..ff99e045d2eea --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-340 @@ -0,0 +1 @@ +foo[3] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-341 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-341 new file mode 100644 index 0000000000000..040ecb240c85a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-341 @@ -0,0 +1 @@ +foo[4] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-342 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-342 new file mode 100644 index 0000000000000..6e7ea636eec7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-342 @@ -0,0 +1 @@ +[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-343 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-343 new file mode 100644 index 0000000000000..bace2a0be1726 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-343 @@ -0,0 +1 @@ +[1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-344 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-344 new file mode 100644 index 0000000000000..5d50c80c06834 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-344 @@ -0,0 +1 @@ +[2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-345 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-345 new file mode 100644 index 0000000000000..99d21a2a0f097 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-345 @@ -0,0 +1 @@ +[-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-346 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-346 new file mode 100644 index 0000000000000..133a9c6272f9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-346 @@ -0,0 +1 @@ +[-2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-347 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-347 new file mode 100644 index 0000000000000..b7f78c5dc5140 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-347 @@ -0,0 +1 @@ +[-3] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-348 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-348 new file mode 100644 index 0000000000000..bd9de815f4563 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-348 @@ -0,0 +1 @@ +reservations[].instances[].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-349 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-349 new file mode 100644 index 0000000000000..55e6257352f13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-349 @@ -0,0 +1 @@ +reservations[].instances[].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-35 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-35 new file mode 100644 index 0000000000000..fa83f1da3b72e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-35 @@ -0,0 +1 @@ +foo[?age > `30`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-350 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-350 new file mode 100644 index 0000000000000..1661747c04dda --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-350 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-351 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-351 new file mode 100644 index 0000000000000..1661747c04dda --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-351 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-352 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-352 new file mode 100644 index 0000000000000..3debc70f895f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-352 @@ -0,0 +1 @@ +reservations[].instances[].foo[].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-353 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-353 new file mode 100644 index 0000000000000..75af2fda0691c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-353 @@ -0,0 +1 @@ +reservations[].instances[].foo[].baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-354 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-354 new file mode 100644 index 0000000000000..4a70cd8a03c67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-354 @@ -0,0 +1 @@ +reservations[].instances[].notfoo[].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-355 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-355 new file mode 100644 index 0000000000000..987985b002fcb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-355 @@ -0,0 +1 @@ +reservations[].instances[].notfoo[].notbar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-356 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-356 new file mode 100644 index 0000000000000..1661747c04dda --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-356 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-357 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-357 new file mode 100644 index 0000000000000..634f937e555f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-357 @@ -0,0 +1 @@ +reservations[].instances[].foo[].notbar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-358 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-358 new file mode 100644 index 0000000000000..09cb7b8bb1bfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-358 @@ -0,0 +1 @@ +reservations[].instances[].bar[].baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-359 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-359 new file mode 100644 index 0000000000000..f5d9ac5b7633d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-359 @@ -0,0 +1 @@ +reservations[].instances[].baz[].baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-36 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-36 new file mode 100644 index 0000000000000..463a2a542db53 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-36 @@ -0,0 +1 @@ +foo[?age < `25`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-360 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-360 new file mode 100644 index 0000000000000..d1016d6e75bd2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-360 @@ -0,0 +1 @@ +reservations[].instances[].qux[].baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-361 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-361 new file mode 100644 index 0000000000000..ef54cf52d6774 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-361 @@ -0,0 +1 @@ +reservations[].instances[].qux[].baz[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-362 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-362 new file mode 100644 index 0000000000000..bea506ff235f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-362 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-363 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-363 new file mode 100644 index 0000000000000..20dd081e0e406 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-363 @@ -0,0 +1 @@ +foo[][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-364 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-364 new file mode 100644 index 0000000000000..4803734b09eed --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-364 @@ -0,0 +1 @@ +foo[][1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-365 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-365 new file mode 100644 index 0000000000000..1be565985349f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-365 @@ -0,0 +1 @@ +foo[][0][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-366 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-366 new file mode 100644 index 0000000000000..d2cf6da59fe06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-366 @@ -0,0 +1 @@ +foo[][2][2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-367 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-367 new file mode 100644 index 0000000000000..c609ca64b16e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-367 @@ -0,0 +1 @@ +foo[][0][0][100] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-368 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-368 new file mode 100644 index 0000000000000..19102815663d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-368 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-369 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-369 new file mode 100644 index 0000000000000..bea506ff235f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-369 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-37 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-37 new file mode 100644 index 0000000000000..10ed5d3f61f69 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-37 @@ -0,0 +1 @@ +foo[?age <= `25`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-370 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-370 new file mode 100644 index 0000000000000..13f2c4a0b4faf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-370 @@ -0,0 +1 @@ +foo[].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-371 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-371 new file mode 100644 index 0000000000000..edf3d92775d30 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-371 @@ -0,0 +1 @@ +foo[].bar[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-372 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-372 new file mode 100644 index 0000000000000..2a3b993af24e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-372 @@ -0,0 +1 @@ +foo[].bar[].baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-373 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-373 new file mode 100644 index 0000000000000..d5ca878a13b79 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-373 @@ -0,0 +1 @@ +string[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-374 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-374 new file mode 100644 index 0000000000000..fcd255f5d0c74 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-374 @@ -0,0 +1 @@ +hash[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-375 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-375 new file mode 100644 index 0000000000000..2d53bd7cdcdac --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-375 @@ -0,0 +1 @@ +number[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-376 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-376 new file mode 100644 index 0000000000000..cb10d2497e151 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-376 @@ -0,0 +1 @@ +nullvalue[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-377 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-377 new file mode 100644 index 0000000000000..f6c79ca84ff99 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-377 @@ -0,0 +1 @@ +string[].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-378 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-378 new file mode 100644 index 0000000000000..09bf36e8a9002 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-378 @@ -0,0 +1 @@ +hash[].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-379 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-379 new file mode 100644 index 0000000000000..4c357818919ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-379 @@ -0,0 +1 @@ +number[].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-38 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-38 new file mode 100644 index 0000000000000..16a4c36acbdb6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-38 @@ -0,0 +1 @@ +foo[?age < `20`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-380 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-380 new file mode 100644 index 0000000000000..2dd8ae218552d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-380 @@ -0,0 +1 @@ +nullvalue[].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-381 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-381 new file mode 100644 index 0000000000000..dfed8160389ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-381 @@ -0,0 +1 @@ +nullvalue[].foo[].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-382 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-382 new file mode 100644 index 0000000000000..d7628e646e1c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-382 @@ -0,0 +1 @@ +`"foo"` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-383 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-383 new file mode 100644 index 0000000000000..49c5269b12edc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-383 @@ -0,0 +1 @@ +`"\u03a6"` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-384 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-384 new file mode 100644 index 0000000000000..d5db721d01201 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-384 @@ -0,0 +1 @@ +`"✓"` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-385 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-385 new file mode 100644 index 0000000000000..a2b6e4ec85dfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-385 @@ -0,0 +1 @@ +`[1, 2, 3]` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-386 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-386 new file mode 100644 index 0000000000000..f5801bdd6803e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-386 @@ -0,0 +1 @@ +`{"a": "b"}` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-387 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-387 new file mode 100644 index 0000000000000..f87db59a8b749 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-387 @@ -0,0 +1 @@ +`true` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-388 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-388 new file mode 100644 index 0000000000000..3b20d905f334a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-388 @@ -0,0 +1 @@ +`false` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-389 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-389 new file mode 100644 index 0000000000000..70bcd29a7a8dc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-389 @@ -0,0 +1 @@ +`null` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-39 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-39 new file mode 100644 index 0000000000000..351054d3e619c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-39 @@ -0,0 +1 @@ +foo[?age == `20`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-390 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-390 new file mode 100644 index 0000000000000..0918d41559bc9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-390 @@ -0,0 +1 @@ +`0` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-391 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-391 new file mode 100644 index 0000000000000..ef70c4c11e7e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-391 @@ -0,0 +1 @@ +`1` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-392 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-392 new file mode 100644 index 0000000000000..b39a922f40325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-392 @@ -0,0 +1 @@ +`2` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-393 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-393 new file mode 100644 index 0000000000000..7e65687dbfc1d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-393 @@ -0,0 +1 @@ +`3` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-394 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-394 new file mode 100644 index 0000000000000..770d1ece7059e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-394 @@ -0,0 +1 @@ +`4` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-395 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-395 new file mode 100644 index 0000000000000..a8b81985c760e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-395 @@ -0,0 +1 @@ +`5` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-396 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-396 new file mode 100644 index 0000000000000..7f0861065e73d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-396 @@ -0,0 +1 @@ +`6` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-397 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-397 new file mode 100644 index 0000000000000..495114d919e5e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-397 @@ -0,0 +1 @@ +`7` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-398 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-398 new file mode 100644 index 0000000000000..94f355c46caaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-398 @@ -0,0 +1 @@ +`8` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-399 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-399 new file mode 100644 index 0000000000000..600d2aa3f49f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-399 @@ -0,0 +1 @@ +`9` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-4 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-4 new file mode 100644 index 0000000000000..3148522358058 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-4 @@ -0,0 +1 @@ +foo.bar.baz.bad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-40 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-40 new file mode 100644 index 0000000000000..99d9258a625bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-40 @@ -0,0 +1 @@ +foo[?age != `20`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-400 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-400 new file mode 100644 index 0000000000000..637015b5fde4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-400 @@ -0,0 +1 @@ +`"foo\`bar"` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-401 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-401 new file mode 100644 index 0000000000000..6fa7557b8d8b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-401 @@ -0,0 +1 @@ +`"foo\"bar"` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-402 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-402 new file mode 100644 index 0000000000000..5aabeec341936 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-402 @@ -0,0 +1 @@ +`"1\`"` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-403 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-403 new file mode 100644 index 0000000000000..8302ea1984db5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-403 @@ -0,0 +1 @@ +`"\\"`.{a:`"b"`} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-404 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-404 new file mode 100644 index 0000000000000..d88d014a96c64 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-404 @@ -0,0 +1 @@ +`{"a": "b"}`.a \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-405 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-405 new file mode 100644 index 0000000000000..47152dddb730d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-405 @@ -0,0 +1 @@ +`{"a": {"b": "c"}}`.a.b \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-406 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-406 new file mode 100644 index 0000000000000..895d429387837 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-406 @@ -0,0 +1 @@ +`[0, 1, 2]`[1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-407 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-407 new file mode 100644 index 0000000000000..42500a368cc68 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-407 @@ -0,0 +1 @@ +` {"foo": true}` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-408 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-408 new file mode 100644 index 0000000000000..08b944dad4a76 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-408 @@ -0,0 +1 @@ +`{"foo": true} ` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-409 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-409 new file mode 100644 index 0000000000000..6de163f80bcc5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-409 @@ -0,0 +1 @@ +'foo' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-41 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-41 new file mode 100644 index 0000000000000..5bc357d9fa6bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-41 @@ -0,0 +1 @@ +foo[?top.name == 'a'] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-410 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-410 new file mode 100644 index 0000000000000..b84bbdb29fb98 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-410 @@ -0,0 +1 @@ +' foo ' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-411 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-411 new file mode 100644 index 0000000000000..bf6a07ace3db4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-411 @@ -0,0 +1 @@ +'0' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-412 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-412 new file mode 100644 index 0000000000000..c742f5b0c9747 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-412 @@ -0,0 +1,2 @@ +'newline +' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-413 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-413 new file mode 100644 index 0000000000000..04e9b3ade60f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-413 @@ -0,0 +1,2 @@ +' +' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-414 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-414 new file mode 100644 index 0000000000000..ebdaf120d7b4e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-414 @@ -0,0 +1 @@ +'✓' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-415 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-415 new file mode 100644 index 0000000000000..d0ba5d7facb99 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-415 @@ -0,0 +1 @@ +'𝄞' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-416 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-416 new file mode 100644 index 0000000000000..19c2e2ef41c35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-416 @@ -0,0 +1 @@ +' [foo] ' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-417 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-417 new file mode 100644 index 0000000000000..5faa483b197d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-417 @@ -0,0 +1 @@ +'[foo]' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-418 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-418 new file mode 100644 index 0000000000000..e3c05c163a36a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-418 @@ -0,0 +1 @@ +'\u03a6' \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-419 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-419 new file mode 100644 index 0000000000000..7c13861ac7278 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-419 @@ -0,0 +1 @@ +foo.{bar: bar} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-42 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-42 new file mode 100644 index 0000000000000..d037a0a4ddffa --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-42 @@ -0,0 +1 @@ +foo[?top.first == top.last] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-420 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-420 new file mode 100644 index 0000000000000..f795c25521921 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-420 @@ -0,0 +1 @@ +foo.{"bar": bar} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-421 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-421 new file mode 100644 index 0000000000000..772c45639cc95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-421 @@ -0,0 +1 @@ +foo.{"foo.bar": bar} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-422 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-422 new file mode 100644 index 0000000000000..8808e92bf3145 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-422 @@ -0,0 +1 @@ +foo.{bar: bar, baz: baz} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-423 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-423 new file mode 100644 index 0000000000000..3f13757a10c9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-423 @@ -0,0 +1 @@ +foo.{"bar": bar, "baz": baz} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-424 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-424 new file mode 100644 index 0000000000000..23cd8903e73b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-424 @@ -0,0 +1 @@ +{"baz": baz, "qux\"": "qux\""} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-425 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-425 new file mode 100644 index 0000000000000..fabb6da4fe87e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-425 @@ -0,0 +1 @@ +foo.{bar:bar,baz:baz} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-426 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-426 new file mode 100644 index 0000000000000..4c3f615b1cda0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-426 @@ -0,0 +1 @@ +foo.{bar: bar,qux: qux} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-427 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-427 new file mode 100644 index 0000000000000..8bc46535ac5bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-427 @@ -0,0 +1 @@ +foo.{bar: bar, noexist: noexist} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-428 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-428 new file mode 100644 index 0000000000000..2024b6f118d70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-428 @@ -0,0 +1 @@ +foo.{noexist: noexist, alsonoexist: alsonoexist} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-429 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-429 new file mode 100644 index 0000000000000..b52191d10a49a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-429 @@ -0,0 +1 @@ +foo.badkey.{nokey: nokey, alsonokey: alsonokey} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-43 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-43 new file mode 100644 index 0000000000000..8534a5cae9f31 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-43 @@ -0,0 +1 @@ +foo[?top == `{"first": "foo", "last": "bar"}`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-430 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-430 new file mode 100644 index 0000000000000..5cd310b6d243b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-430 @@ -0,0 +1 @@ +foo.nested.*.{a: a,b: b} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-431 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-431 new file mode 100644 index 0000000000000..0b24ef53526e3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-431 @@ -0,0 +1 @@ +foo.nested.three.{a: a, cinner: c.inner} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-432 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-432 new file mode 100644 index 0000000000000..473c1c351b4c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-432 @@ -0,0 +1 @@ +foo.nested.three.{a: a, c: c.inner.bad.key} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-433 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-433 new file mode 100644 index 0000000000000..44ba735ab10f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-433 @@ -0,0 +1 @@ +foo.{a: nested.one.a, b: nested.two.b} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-434 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-434 new file mode 100644 index 0000000000000..f5f89b12b5caf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-434 @@ -0,0 +1 @@ +{bar: bar, baz: baz} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-435 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-435 new file mode 100644 index 0000000000000..697764cb306a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-435 @@ -0,0 +1 @@ +{bar: bar} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-436 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-436 new file mode 100644 index 0000000000000..20447fb1054ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-436 @@ -0,0 +1 @@ +{otherkey: bar} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-437 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-437 new file mode 100644 index 0000000000000..310b9b1dd2703 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-437 @@ -0,0 +1 @@ +{no: no, exist: exist} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-438 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-438 new file mode 100644 index 0000000000000..c79b2e2406833 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-438 @@ -0,0 +1 @@ +foo.[bar] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-439 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-439 new file mode 100644 index 0000000000000..ab498ef654cd4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-439 @@ -0,0 +1 @@ +foo.[bar,baz] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-44 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-44 new file mode 100644 index 0000000000000..71307c4094772 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-44 @@ -0,0 +1 @@ +foo[?key == `true`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-440 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-440 new file mode 100644 index 0000000000000..4b8f39a468e44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-440 @@ -0,0 +1 @@ +foo.[bar,qux] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-441 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-441 new file mode 100644 index 0000000000000..b8f9020f8e793 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-441 @@ -0,0 +1 @@ +foo.[bar,noexist] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-442 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-442 new file mode 100644 index 0000000000000..b7c7b3f6556a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-442 @@ -0,0 +1 @@ +foo.[noexist,alsonoexist] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-443 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-443 new file mode 100644 index 0000000000000..fabb6da4fe87e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-443 @@ -0,0 +1 @@ +foo.{bar:bar,baz:baz} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-444 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-444 new file mode 100644 index 0000000000000..c15c39f82c1ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-444 @@ -0,0 +1 @@ +foo.[bar,baz[0]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-445 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-445 new file mode 100644 index 0000000000000..9cebd89844f5d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-445 @@ -0,0 +1 @@ +foo.[bar,baz[1]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-446 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-446 new file mode 100644 index 0000000000000..c5bbfbf848f0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-446 @@ -0,0 +1 @@ +foo.[bar,baz[2]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-447 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-447 new file mode 100644 index 0000000000000..d81cb2b90414e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-447 @@ -0,0 +1 @@ +foo.[bar,baz[3]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-448 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-448 new file mode 100644 index 0000000000000..3a65aa7d6f1fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-448 @@ -0,0 +1 @@ +foo.[bar[0],baz[3]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-449 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-449 new file mode 100644 index 0000000000000..8808e92bf3145 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-449 @@ -0,0 +1 @@ +foo.{bar: bar, baz: baz} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-45 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-45 new file mode 100644 index 0000000000000..e142b22a25f56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-45 @@ -0,0 +1 @@ +foo[?key == `false`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-450 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-450 new file mode 100644 index 0000000000000..ab498ef654cd4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-450 @@ -0,0 +1 @@ +foo.[bar,baz] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-451 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-451 new file mode 100644 index 0000000000000..8e3d22dc53f02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-451 @@ -0,0 +1 @@ +foo.{bar: bar.baz[1],includeme: includeme} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-452 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-452 new file mode 100644 index 0000000000000..398c7f8b06ec6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-452 @@ -0,0 +1 @@ +foo.{"bar.baz.two": bar.baz[1].two, includeme: includeme} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-453 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-453 new file mode 100644 index 0000000000000..a17644487856e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-453 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[*].common] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-454 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-454 new file mode 100644 index 0000000000000..da5225ddccb01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-454 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[*].none] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-455 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-455 new file mode 100644 index 0000000000000..a8870b22bcc37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-455 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[].common] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-456 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-456 new file mode 100644 index 0000000000000..420b1a57c450d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-456 @@ -0,0 +1 @@ +reservations[*].instances[*].{id: id, name: name} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-457 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-457 new file mode 100644 index 0000000000000..0761ee16dc01f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-457 @@ -0,0 +1 @@ +reservations[].instances[].{id: id, name: name} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-458 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-458 new file mode 100644 index 0000000000000..aa1191a48cdf1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-458 @@ -0,0 +1 @@ +reservations[].instances[].[id, name] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-459 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-459 new file mode 100644 index 0000000000000..19102815663d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-459 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-46 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-46 new file mode 100644 index 0000000000000..9a24a464eedb5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-46 @@ -0,0 +1 @@ +foo[?key == `0`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-460 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-460 new file mode 100644 index 0000000000000..bea506ff235f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-460 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-461 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-461 new file mode 100644 index 0000000000000..13f2c4a0b4faf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-461 @@ -0,0 +1 @@ +foo[].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-462 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-462 new file mode 100644 index 0000000000000..edf3d92775d30 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-462 @@ -0,0 +1 @@ +foo[].bar[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-463 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-463 new file mode 100644 index 0000000000000..d965466e91ce5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-463 @@ -0,0 +1 @@ +foo[].bar[].[baz, qux] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-464 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-464 new file mode 100644 index 0000000000000..f1822a1742b88 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-464 @@ -0,0 +1 @@ +foo[].bar[].[baz] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-465 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-465 new file mode 100644 index 0000000000000..c6f77b80c33ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-465 @@ -0,0 +1 @@ +foo[].bar[].[baz, qux][] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-466 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-466 new file mode 100644 index 0000000000000..db56262a4ea22 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-466 @@ -0,0 +1 @@ +foo.[baz[*].bar, qux[0]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-467 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-467 new file mode 100644 index 0000000000000..b901067d271d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-467 @@ -0,0 +1 @@ +foo.[baz[*].[bar, boo], qux[0]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-468 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-468 new file mode 100644 index 0000000000000..738479fa69bbf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-468 @@ -0,0 +1 @@ +foo.[baz[*].not_there || baz[*].bar, qux[0]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-469 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-469 new file mode 100644 index 0000000000000..6926996a7b6a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-469 @@ -0,0 +1 @@ +[[*],*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-47 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-47 new file mode 100644 index 0000000000000..6d33cc72c4ede --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-47 @@ -0,0 +1 @@ +foo[?key == `1`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-470 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-470 new file mode 100644 index 0000000000000..736be0a31f20e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-470 @@ -0,0 +1 @@ +[[*]] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-471 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-471 new file mode 100644 index 0000000000000..29e1fb20a55b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-471 @@ -0,0 +1 @@ +outer.foo || outer.bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-472 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-472 new file mode 100644 index 0000000000000..c0070ba783a11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-472 @@ -0,0 +1 @@ +outer.foo||outer.bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-473 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-473 new file mode 100644 index 0000000000000..661b0bec5e376 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-473 @@ -0,0 +1 @@ +outer.bar || outer.baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-474 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-474 new file mode 100644 index 0000000000000..296d5aeee16a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-474 @@ -0,0 +1 @@ +outer.bar||outer.baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-475 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-475 new file mode 100644 index 0000000000000..ca140f8aa3300 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-475 @@ -0,0 +1 @@ +outer.bad || outer.foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-476 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-476 new file mode 100644 index 0000000000000..15d30924274a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-476 @@ -0,0 +1 @@ +outer.bad||outer.foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-477 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-477 new file mode 100644 index 0000000000000..56148d957b7d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-477 @@ -0,0 +1 @@ +outer.foo || outer.bad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-478 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-478 new file mode 100644 index 0000000000000..6d3cf6d9069a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-478 @@ -0,0 +1 @@ +outer.foo||outer.bad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-479 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-479 new file mode 100644 index 0000000000000..100fa8339d52f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-479 @@ -0,0 +1 @@ +outer.bad || outer.alsobad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-48 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-48 new file mode 100644 index 0000000000000..de56fc042cb70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-48 @@ -0,0 +1 @@ +foo[?key == `[0]`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-480 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-480 new file mode 100644 index 0000000000000..64490352bf0e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-480 @@ -0,0 +1 @@ +outer.bad||outer.alsobad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-481 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-481 new file mode 100644 index 0000000000000..af901bde17ed5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-481 @@ -0,0 +1 @@ +outer.empty_string || outer.foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-482 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-482 new file mode 100644 index 0000000000000..36b63e462c4c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-482 @@ -0,0 +1 @@ +outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-483 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-483 new file mode 100644 index 0000000000000..aba584f99ed6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-483 @@ -0,0 +1 @@ +foo.*.baz | [0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-484 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-484 new file mode 100644 index 0000000000000..4234ac019228f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-484 @@ -0,0 +1 @@ +foo.*.baz | [1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-485 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-485 new file mode 100644 index 0000000000000..12330d9902c8a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-485 @@ -0,0 +1 @@ +foo.*.baz | [2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-486 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-486 new file mode 100644 index 0000000000000..1b2d93e1952d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-486 @@ -0,0 +1 @@ +foo.bar.* | [0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-487 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-487 new file mode 100644 index 0000000000000..c371fc6457a60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-487 @@ -0,0 +1 @@ +foo.*.notbaz | [*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-488 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-488 new file mode 100644 index 0000000000000..3c835642eb24d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-488 @@ -0,0 +1 @@ +foo | bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-489 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-489 new file mode 100644 index 0000000000000..decaa0421d633 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-489 @@ -0,0 +1 @@ +foo | bar | baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-49 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-49 new file mode 100644 index 0000000000000..49d9c63a3901f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-49 @@ -0,0 +1 @@ +foo[?key == `{"bar": [0]}`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-490 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-490 new file mode 100644 index 0000000000000..b91068037b1c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-490 @@ -0,0 +1 @@ +foo|bar| baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-491 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-491 new file mode 100644 index 0000000000000..11df74d8b4684 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-491 @@ -0,0 +1 @@ +not_there | [0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-492 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-492 new file mode 100644 index 0000000000000..11df74d8b4684 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-492 @@ -0,0 +1 @@ +not_there | [0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-493 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-493 new file mode 100644 index 0000000000000..37da9fc0b7144 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-493 @@ -0,0 +1 @@ +[foo.bar, foo.other] | [0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-494 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-494 new file mode 100644 index 0000000000000..1f4fc943d7092 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-494 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | a \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-495 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-495 new file mode 100644 index 0000000000000..67c7ea9cfe436 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-495 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | b \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-496 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-496 new file mode 100644 index 0000000000000..d87f9bba4cdc7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-496 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | *.baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-497 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-497 new file mode 100644 index 0000000000000..ebf8e27114077 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-497 @@ -0,0 +1 @@ +foo.bam || foo.bar | baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-498 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-498 new file mode 100644 index 0000000000000..f32bc6db510ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-498 @@ -0,0 +1 @@ +foo | not_there || bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-499 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-499 new file mode 100644 index 0000000000000..d04459d9090f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-499 @@ -0,0 +1 @@ +foo[*].bar[*] | [0][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-5 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-5 new file mode 100644 index 0000000000000..b537264a1d529 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-5 @@ -0,0 +1 @@ +foo.bar.bad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-50 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-50 new file mode 100644 index 0000000000000..c17c1df17063d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-50 @@ -0,0 +1 @@ +foo[?key == `null`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-500 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-500 new file mode 100644 index 0000000000000..3eb869f435b64 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-500 @@ -0,0 +1 @@ +bar[0:10] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-501 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-501 new file mode 100644 index 0000000000000..aa5d6be52c7de --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-501 @@ -0,0 +1 @@ +foo[0:10:1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-502 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-502 new file mode 100644 index 0000000000000..1a4d1682da0f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-502 @@ -0,0 +1 @@ +foo[0:10] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-503 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-503 new file mode 100644 index 0000000000000..5925a578b52a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-503 @@ -0,0 +1 @@ +foo[0:10:] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-504 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-504 new file mode 100644 index 0000000000000..081e93abd9805 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-504 @@ -0,0 +1 @@ +foo[0::1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-505 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-505 new file mode 100644 index 0000000000000..922700149593b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-505 @@ -0,0 +1 @@ +foo[0::] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-506 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-506 new file mode 100644 index 0000000000000..fd2294d6646bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-506 @@ -0,0 +1 @@ +foo[0:] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-507 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-507 new file mode 100644 index 0000000000000..c6b551d5eaed5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-507 @@ -0,0 +1 @@ +foo[:10:1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-508 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-508 new file mode 100644 index 0000000000000..503f58da6e301 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-508 @@ -0,0 +1 @@ +foo[::1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-509 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-509 new file mode 100644 index 0000000000000..f78bb770caa01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-509 @@ -0,0 +1 @@ +foo[:10:] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-51 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-51 new file mode 100644 index 0000000000000..589a214f45351 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-51 @@ -0,0 +1 @@ +foo[?key == `[1]`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-510 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-510 new file mode 100644 index 0000000000000..eb9d2ba881ed7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-510 @@ -0,0 +1 @@ +foo[::] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-511 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-511 new file mode 100644 index 0000000000000..1921a3d9865c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-511 @@ -0,0 +1 @@ +foo[:] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-512 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-512 new file mode 100644 index 0000000000000..a87afcb1b3200 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-512 @@ -0,0 +1 @@ +foo[1:9] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-513 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-513 new file mode 100644 index 0000000000000..dbf51d8cde4f9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-513 @@ -0,0 +1 @@ +foo[0:10:2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-514 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-514 new file mode 100644 index 0000000000000..f7288763a71af --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-514 @@ -0,0 +1 @@ +foo[5:] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-515 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-515 new file mode 100644 index 0000000000000..64395761df696 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-515 @@ -0,0 +1 @@ +foo[5::2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-516 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-516 new file mode 100644 index 0000000000000..706bb14dd7e35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-516 @@ -0,0 +1 @@ +foo[::2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-517 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-517 new file mode 100644 index 0000000000000..8fcfaee95c675 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-517 @@ -0,0 +1 @@ +foo[::-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-518 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-518 new file mode 100644 index 0000000000000..f6a00bf9bbc6f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-518 @@ -0,0 +1 @@ +foo[1::2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-519 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-519 new file mode 100644 index 0000000000000..ea068ee06fb1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-519 @@ -0,0 +1 @@ +foo[10:0:-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-52 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-52 new file mode 100644 index 0000000000000..214917ac068e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-52 @@ -0,0 +1 @@ +foo[?key == `{"a":2}`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-520 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-520 new file mode 100644 index 0000000000000..1fe14258edcf2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-520 @@ -0,0 +1 @@ +foo[10:5:-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-521 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-521 new file mode 100644 index 0000000000000..4ba0e13022832 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-521 @@ -0,0 +1 @@ +foo[8:2:-2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-522 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-522 new file mode 100644 index 0000000000000..25db439ff7d53 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-522 @@ -0,0 +1 @@ +foo[0:20] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-523 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-523 new file mode 100644 index 0000000000000..8a965920af5a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-523 @@ -0,0 +1 @@ +foo[10:-20:-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-524 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-524 new file mode 100644 index 0000000000000..b1e5ba3734fd0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-524 @@ -0,0 +1 @@ +foo[10:-20] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-525 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-525 new file mode 100644 index 0000000000000..06253112e4e7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-525 @@ -0,0 +1 @@ +foo[-4:-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-526 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-526 new file mode 100644 index 0000000000000..1e14a6a4c5e03 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-526 @@ -0,0 +1 @@ +foo[:-5:-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-527 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-527 new file mode 100644 index 0000000000000..aef5c2747d277 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-527 @@ -0,0 +1 @@ +foo[:2].a \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-528 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-528 new file mode 100644 index 0000000000000..93c95fcf68945 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-528 @@ -0,0 +1 @@ +foo[:2].b \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-529 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-529 new file mode 100644 index 0000000000000..7e0733e595e6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-529 @@ -0,0 +1 @@ +foo[:2].a.b \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-53 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-53 new file mode 100644 index 0000000000000..4c002ed80d69a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-53 @@ -0,0 +1 @@ +foo[?`true` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-530 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-530 new file mode 100644 index 0000000000000..2438b25762d1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-530 @@ -0,0 +1 @@ +bar[::-1].a.b \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-531 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-531 new file mode 100644 index 0000000000000..549994b6bc4aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-531 @@ -0,0 +1 @@ +bar[:2].a.b \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-532 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-532 new file mode 100644 index 0000000000000..ab98292b452ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-532 @@ -0,0 +1 @@ +baz[:2].a \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-533 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-533 new file mode 100644 index 0000000000000..65fca9687646a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-533 @@ -0,0 +1 @@ +[:] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-534 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-534 new file mode 100644 index 0000000000000..18c5daf7bea9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-534 @@ -0,0 +1 @@ +[:2].a \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-535 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-535 new file mode 100644 index 0000000000000..1bb84f7d4bd02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-535 @@ -0,0 +1 @@ +[::-1].a \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-536 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-536 new file mode 100644 index 0000000000000..7a0416f0566b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-536 @@ -0,0 +1 @@ +[:2].b \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-537 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-537 new file mode 100644 index 0000000000000..4d5f9756e551b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-537 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-538 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-538 new file mode 100644 index 0000000000000..19102815663d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-538 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-539 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-539 new file mode 100644 index 0000000000000..f59ec20aabf58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-539 @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-54 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-54 new file mode 100644 index 0000000000000..23d27073e9794 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-54 @@ -0,0 +1 @@ +foo[?`false` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-540 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-540 new file mode 100644 index 0000000000000..dee5695746e35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-540 @@ -0,0 +1 @@ +*.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-541 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-541 new file mode 100644 index 0000000000000..1a16f7418d2bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-541 @@ -0,0 +1 @@ +*.foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-542 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-542 new file mode 100644 index 0000000000000..7e8066d39f51a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-542 @@ -0,0 +1 @@ +*[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-543 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-543 new file mode 100644 index 0000000000000..0637a088a01e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-543 @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-544 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-544 new file mode 100644 index 0000000000000..6e7ea636eec7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-544 @@ -0,0 +1 @@ +[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-545 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-545 new file mode 100644 index 0000000000000..5a5194647ade1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-545 @@ -0,0 +1 @@ +[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-546 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-546 new file mode 100644 index 0000000000000..416127425c2de --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-546 @@ -0,0 +1 @@ +*.["0"] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-547 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-547 new file mode 100644 index 0000000000000..cd9fb6ba77880 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-547 @@ -0,0 +1 @@ +[*].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-548 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-548 new file mode 100644 index 0000000000000..9f3ada4807792 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-548 @@ -0,0 +1 @@ +[*][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-549 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-549 new file mode 100644 index 0000000000000..9b0b2f8189b81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-549 @@ -0,0 +1 @@ +foo[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-55 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-55 new file mode 100644 index 0000000000000..6d840ee568ff9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-55 @@ -0,0 +1 @@ +foo[?`0` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-550 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-550 new file mode 100644 index 0000000000000..b23413b92af1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-550 @@ -0,0 +1 @@ +foo.[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-551 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-551 new file mode 100644 index 0000000000000..08ab2e1c4205c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-551 @@ -0,0 +1 @@ +foo.[abc] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-552 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-552 new file mode 100644 index 0000000000000..78b05a5c64e23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-552 @@ -0,0 +1 @@ +foo.[abc, def] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-553 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-553 new file mode 100644 index 0000000000000..1e7b886e79eb2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-553 @@ -0,0 +1 @@ +a.{foo: bar} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-554 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-554 new file mode 100644 index 0000000000000..91b4c9896e16d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-554 @@ -0,0 +1 @@ +a.{foo: bar, baz: bam} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-555 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-555 new file mode 100644 index 0000000000000..8301ef981efd9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-555 @@ -0,0 +1 @@ +{"\\":{" ":*}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-556 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-556 new file mode 100644 index 0000000000000..8f75cc9133438 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-556 @@ -0,0 +1 @@ +foo || bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-557 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-557 new file mode 100644 index 0000000000000..e5f122c569164 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-557 @@ -0,0 +1 @@ +foo.[a || b] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-558 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-558 new file mode 100644 index 0000000000000..39d1914328bba --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-558 @@ -0,0 +1 @@ +foo[?bar==`"baz"`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-559 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-559 new file mode 100644 index 0000000000000..d08bbe2501e52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-559 @@ -0,0 +1 @@ +foo[? bar == `"baz"` ] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-56 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-56 new file mode 100644 index 0000000000000..addaf204c5bdb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-56 @@ -0,0 +1 @@ +foo[?`1` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-560 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-560 new file mode 100644 index 0000000000000..a77f355812c45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-560 @@ -0,0 +1 @@ +foo[?a.b.c==d.e.f] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-561 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-561 new file mode 100644 index 0000000000000..c9697aa48707b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-561 @@ -0,0 +1 @@ +foo[?bar==`[0, 1, 2]`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-562 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-562 new file mode 100644 index 0000000000000..fd7064a08688f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-562 @@ -0,0 +1 @@ +foo[?bar==`["a", "b", "c"]`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-563 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-563 new file mode 100644 index 0000000000000..61e5e1b8f716a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-563 @@ -0,0 +1 @@ +foo[?bar==`["foo\`bar"]`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-564 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-564 new file mode 100644 index 0000000000000..bc9d8af1d9257 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-564 @@ -0,0 +1 @@ +[?"\\">`"foo"`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-565 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-565 new file mode 100644 index 0000000000000..2dd54dc390425 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-565 @@ -0,0 +1 @@ +[?"\\" > `"foo"`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-566 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-566 new file mode 100644 index 0000000000000..19102815663d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-566 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-567 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-567 new file mode 100644 index 0000000000000..7e9668e7834c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-567 @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-568 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-568 new file mode 100644 index 0000000000000..d58ac16bf0304 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-568 @@ -0,0 +1 @@ +"\\" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-569 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-569 new file mode 100644 index 0000000000000..33ac9fba6ff90 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-569 @@ -0,0 +1 @@ +*||*|*|* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-57 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-57 new file mode 100644 index 0000000000000..acf2435c7cbd4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-57 @@ -0,0 +1 @@ +foo[?`[0]` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-570 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-570 new file mode 100644 index 0000000000000..99e19638ce40c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-570 @@ -0,0 +1 @@ +*[]||[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-571 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-571 new file mode 100644 index 0000000000000..be0845011c3f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-571 @@ -0,0 +1 @@ +[*.*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-572 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-572 new file mode 100644 index 0000000000000..a84b51e1cda05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-572 @@ -0,0 +1 @@ +foo[]."✓" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-573 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-573 new file mode 100644 index 0000000000000..c2de55815f09a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-573 @@ -0,0 +1 @@ +"☯" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-574 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-574 new file mode 100644 index 0000000000000..dc2dda0bb68bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-574 @@ -0,0 +1 @@ +"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-575 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-575 new file mode 100644 index 0000000000000..a2d3d5f6ae0b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-575 @@ -0,0 +1 @@ +"☃" \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-576 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-576 new file mode 100644 index 0000000000000..0971c37eaf77c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-576 @@ -0,0 +1 @@ +foo.*.baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-577 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-577 new file mode 100644 index 0000000000000..0e39dfd695d68 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-577 @@ -0,0 +1 @@ +foo.bar.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-578 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-578 new file mode 100644 index 0000000000000..89c1ce22dc7e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-578 @@ -0,0 +1 @@ +foo.*.notbaz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-579 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-579 new file mode 100644 index 0000000000000..5199b9f95d4c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-579 @@ -0,0 +1 @@ +foo.*.notbaz[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-58 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-58 new file mode 100644 index 0000000000000..99fe382c6c6eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-58 @@ -0,0 +1 @@ +foo[?`{"bar": [0]}` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-580 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-580 new file mode 100644 index 0000000000000..5bb6d4ae78301 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-580 @@ -0,0 +1 @@ +foo.*.notbaz[-1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-581 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-581 new file mode 100644 index 0000000000000..edac73189d76d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-581 @@ -0,0 +1 @@ +foo.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-582 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-582 new file mode 100644 index 0000000000000..458d0a6ddd090 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-582 @@ -0,0 +1 @@ +foo.*.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-583 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-583 new file mode 100644 index 0000000000000..f757fd5345921 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-583 @@ -0,0 +1 @@ +foo.*.*.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-584 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-584 new file mode 100644 index 0000000000000..670049d96f756 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-584 @@ -0,0 +1 @@ +foo.*.*.*.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-585 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-585 new file mode 100644 index 0000000000000..3c88caafe887a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-585 @@ -0,0 +1 @@ +*.bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-586 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-586 new file mode 100644 index 0000000000000..f59ec20aabf58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-586 @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-587 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-587 new file mode 100644 index 0000000000000..0852fcc78954a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-587 @@ -0,0 +1 @@ +*.sub1 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-588 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-588 new file mode 100644 index 0000000000000..dee5695746e35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-588 @@ -0,0 +1 @@ +*.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-589 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-589 new file mode 100644 index 0000000000000..66781bba40ab3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-589 @@ -0,0 +1 @@ +*.*.foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-59 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-59 new file mode 100644 index 0000000000000..4aad20ae69e0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-59 @@ -0,0 +1 @@ +foo[?`null` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-590 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-590 new file mode 100644 index 0000000000000..0db15d97e447b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-590 @@ -0,0 +1 @@ +*.sub1.foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-591 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-591 new file mode 100644 index 0000000000000..b24be9d7d1c92 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-591 @@ -0,0 +1 @@ +foo[*].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-592 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-592 new file mode 100644 index 0000000000000..e6efe133fcdf5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-592 @@ -0,0 +1 @@ +foo[*].notbar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-593 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-593 new file mode 100644 index 0000000000000..5a5194647ade1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-593 @@ -0,0 +1 @@ +[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-594 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-594 new file mode 100644 index 0000000000000..cd9fb6ba77880 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-594 @@ -0,0 +1 @@ +[*].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-595 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-595 new file mode 100644 index 0000000000000..cbf1a5d596a8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-595 @@ -0,0 +1 @@ +[*].notbar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-596 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-596 new file mode 100644 index 0000000000000..8bd13b7eb493d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-596 @@ -0,0 +1 @@ +foo.bar[*].baz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-597 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-597 new file mode 100644 index 0000000000000..7239f3e887e77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-597 @@ -0,0 +1 @@ +foo.bar[*].baz[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-598 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-598 new file mode 100644 index 0000000000000..f5e431d9e3fe5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-598 @@ -0,0 +1 @@ +foo.bar[*].baz[1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-599 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-599 new file mode 100644 index 0000000000000..d0c25953968fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-599 @@ -0,0 +1 @@ +foo.bar[*].baz[2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-6 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-6 new file mode 100644 index 0000000000000..b9749b748972c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-6 @@ -0,0 +1 @@ +foo.bad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-60 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-60 new file mode 100644 index 0000000000000..dac67509bb4d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-60 @@ -0,0 +1 @@ +foo[?`[1]` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-600 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-600 new file mode 100644 index 0000000000000..a6388271e411c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-600 @@ -0,0 +1 @@ +foo.bar[*].baz[3] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-601 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-601 new file mode 100644 index 0000000000000..2a66ffe93b8e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-601 @@ -0,0 +1 @@ +foo.bar[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-602 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-602 new file mode 100644 index 0000000000000..b6b3695438aaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-602 @@ -0,0 +1 @@ +foo.bar[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-603 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-603 new file mode 100644 index 0000000000000..7e57f9e74ce84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-603 @@ -0,0 +1 @@ +foo.bar[0][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-604 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-604 new file mode 100644 index 0000000000000..c5f8bef0bee32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-604 @@ -0,0 +1 @@ +foo.bar[0][0][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-605 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-605 new file mode 100644 index 0000000000000..3decf08030faa --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-605 @@ -0,0 +1 @@ +foo.bar[0][0][0][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-606 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-606 new file mode 100644 index 0000000000000..655e2959beabb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-606 @@ -0,0 +1 @@ +foo[0][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-607 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-607 new file mode 100644 index 0000000000000..2aa159718c84b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-607 @@ -0,0 +1 @@ +foo[*].bar[*].kind \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-608 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-608 new file mode 100644 index 0000000000000..556b380bab101 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-608 @@ -0,0 +1 @@ +foo[*].bar[0].kind \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-609 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-609 new file mode 100644 index 0000000000000..0de3229b83e91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-609 @@ -0,0 +1 @@ +foo[*].bar.kind \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-61 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-61 new file mode 100644 index 0000000000000..130ed3b379e86 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-61 @@ -0,0 +1 @@ +foo[?`{"a":2}` == key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-610 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-610 new file mode 100644 index 0000000000000..3b511f13351d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-610 @@ -0,0 +1 @@ +foo[*].bar[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-611 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-611 new file mode 100644 index 0000000000000..c8dfa16e6bcdc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-611 @@ -0,0 +1 @@ +foo[*].bar[1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-612 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-612 new file mode 100644 index 0000000000000..69f04ee23ff18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-612 @@ -0,0 +1 @@ +foo[*].bar[2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-613 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-613 new file mode 100644 index 0000000000000..3b511f13351d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-613 @@ -0,0 +1 @@ +foo[*].bar[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-614 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-614 new file mode 100644 index 0000000000000..03e0c0cb93b3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-614 @@ -0,0 +1 @@ +foo[*][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-615 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-615 new file mode 100644 index 0000000000000..ac1c896682132 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-615 @@ -0,0 +1 @@ +foo[*][1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-616 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-616 new file mode 100644 index 0000000000000..03e0c0cb93b3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-616 @@ -0,0 +1 @@ +foo[*][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-617 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-617 new file mode 100644 index 0000000000000..ac1c896682132 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-617 @@ -0,0 +1 @@ +foo[*][1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-618 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-618 new file mode 100644 index 0000000000000..6494cf1c6a060 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-618 @@ -0,0 +1 @@ +foo[*][0][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-619 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-619 new file mode 100644 index 0000000000000..1406be5721fb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-619 @@ -0,0 +1 @@ +foo[*][1][0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-62 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-62 new file mode 100644 index 0000000000000..3d15fcc169789 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-62 @@ -0,0 +1 @@ +foo[?key != `true`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-620 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-620 new file mode 100644 index 0000000000000..72b5aa281a94e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-620 @@ -0,0 +1 @@ +foo[*][0][1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-621 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-621 new file mode 100644 index 0000000000000..02a26491ae4e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-621 @@ -0,0 +1 @@ +foo[*][1][1] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-622 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-622 new file mode 100644 index 0000000000000..cb08037e20925 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-622 @@ -0,0 +1 @@ +foo[*][2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-623 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-623 new file mode 100644 index 0000000000000..91d695995a41c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-623 @@ -0,0 +1 @@ +foo[*][2][2] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-624 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-624 new file mode 100644 index 0000000000000..f40f261ad1730 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-624 @@ -0,0 +1 @@ +bar[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-625 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-625 new file mode 100644 index 0000000000000..03904b1dece44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-625 @@ -0,0 +1 @@ +bar[*].baz[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-626 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-626 new file mode 100644 index 0000000000000..fd7c21c340c7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-626 @@ -0,0 +1 @@ +string[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-627 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-627 new file mode 100644 index 0000000000000..d7ca4719a9917 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-627 @@ -0,0 +1 @@ +hash[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-628 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-628 new file mode 100644 index 0000000000000..b3ddffe3cfebc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-628 @@ -0,0 +1 @@ +number[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-629 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-629 new file mode 100644 index 0000000000000..c03cd39ebf589 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-629 @@ -0,0 +1 @@ +nullvalue[*] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-63 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-63 new file mode 100644 index 0000000000000..08731af69083d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-63 @@ -0,0 +1 @@ +foo[?key != `false`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-630 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-630 new file mode 100644 index 0000000000000..b3c40cd533f4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-630 @@ -0,0 +1 @@ +string[*].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-631 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-631 new file mode 100644 index 0000000000000..c5930d543f3ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-631 @@ -0,0 +1 @@ +hash[*].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-632 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-632 new file mode 100644 index 0000000000000..cc0b1a4896af3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-632 @@ -0,0 +1 @@ +number[*].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-633 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-633 new file mode 100644 index 0000000000000..d677b96585206 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-633 @@ -0,0 +1 @@ +nullvalue[*].foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-634 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-634 new file mode 100644 index 0000000000000..c116664015506 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-634 @@ -0,0 +1 @@ +nullvalue[*].foo[*].bar \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-635 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-635 new file mode 100644 index 0000000000000..e339977108874 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-635 @@ -0,0 +1 @@ +string.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-636 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-636 new file mode 100644 index 0000000000000..76f53453a8a4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-636 @@ -0,0 +1 @@ +hash.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-637 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-637 new file mode 100644 index 0000000000000..dd485072f26a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-637 @@ -0,0 +1 @@ +number.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-638 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-638 new file mode 100644 index 0000000000000..16000c003cd4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-638 @@ -0,0 +1 @@ +array.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-639 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-639 new file mode 100644 index 0000000000000..1d0d03ed3b5e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-639 @@ -0,0 +1 @@ +nullvalue.* \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-64 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-64 new file mode 100644 index 0000000000000..b67aebe98ada4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-64 @@ -0,0 +1 @@ +foo[?key != `0`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-640 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-640 new file mode 100644 index 0000000000000..7e8066d39f51a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-640 @@ -0,0 +1 @@ +*[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-641 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-641 new file mode 100644 index 0000000000000..41ebe5ba9d7b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-641 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-642 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-642 new file mode 100644 index 0000000000000..fe0397993c530 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-642 @@ -0,0 +1 @@ +`foo\"quote` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-643 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-643 new file mode 100644 index 0000000000000..1a27fd80c3c38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-643 @@ -0,0 +1 @@ +`✓` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-644 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-644 new file mode 100644 index 0000000000000..559a13456be1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-644 @@ -0,0 +1 @@ +`foo\"bar` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-645 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-645 new file mode 100644 index 0000000000000..e31621b438eb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-645 @@ -0,0 +1 @@ +`1\`` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-646 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-646 new file mode 100644 index 0000000000000..6bf7a10362a77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-646 @@ -0,0 +1 @@ +`\\`.{a:`b`} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-647 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-647 new file mode 100644 index 0000000000000..41ebe5ba9d7b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-647 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-648 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-648 new file mode 100644 index 0000000000000..28b9bcbbb52fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-648 @@ -0,0 +1 @@ +` foo` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-649 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-649 new file mode 100644 index 0000000000000..41ebe5ba9d7b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-649 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-65 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-65 new file mode 100644 index 0000000000000..d3ac793bb6b4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-65 @@ -0,0 +1 @@ +foo[?key != `1`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-650 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-650 new file mode 100644 index 0000000000000..fe0397993c530 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-650 @@ -0,0 +1 @@ +`foo\"quote` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-651 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-651 new file mode 100644 index 0000000000000..1a27fd80c3c38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-651 @@ -0,0 +1 @@ +`✓` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-652 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-652 new file mode 100644 index 0000000000000..559a13456be1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-652 @@ -0,0 +1 @@ +`foo\"bar` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-653 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-653 new file mode 100644 index 0000000000000..e31621b438eb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-653 @@ -0,0 +1 @@ +`1\`` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-654 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-654 new file mode 100644 index 0000000000000..6bf7a10362a77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-654 @@ -0,0 +1 @@ +`\\`.{a:`b`} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-655 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-655 new file mode 100644 index 0000000000000..41ebe5ba9d7b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-655 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-656 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-656 new file mode 100644 index 0000000000000..28b9bcbbb52fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-656 @@ -0,0 +1 @@ +` foo` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-66 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-66 new file mode 100644 index 0000000000000..065295bc17ac1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-66 @@ -0,0 +1 @@ +foo[?key != `null`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-67 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-67 new file mode 100644 index 0000000000000..43d164927d43d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-67 @@ -0,0 +1 @@ +foo[?key != `[1]`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-68 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-68 new file mode 100644 index 0000000000000..6b884fa866f06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-68 @@ -0,0 +1 @@ +foo[?key != `{"a":2}`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-69 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-69 new file mode 100644 index 0000000000000..d85c779d0a3b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-69 @@ -0,0 +1 @@ +foo[?`true` != key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-7 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-7 new file mode 100644 index 0000000000000..44d6628cdc698 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-7 @@ -0,0 +1 @@ +bad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-70 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-70 new file mode 100644 index 0000000000000..3e6dcf3045a20 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-70 @@ -0,0 +1 @@ +foo[?`false` != key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-71 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-71 new file mode 100644 index 0000000000000..bdb820b30b332 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-71 @@ -0,0 +1 @@ +foo[?`0` != key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-72 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-72 new file mode 100644 index 0000000000000..3f3048a004c2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-72 @@ -0,0 +1 @@ +foo[?`1` != key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-73 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-73 new file mode 100644 index 0000000000000..dacc25724510d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-73 @@ -0,0 +1 @@ +foo[?`null` != key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-74 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-74 new file mode 100644 index 0000000000000..32ebae8800b76 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-74 @@ -0,0 +1 @@ +foo[?`[1]` != key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-75 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-75 new file mode 100644 index 0000000000000..dcd023e0fbc29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-75 @@ -0,0 +1 @@ +foo[?`{"a":2}` != key] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-76 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-76 new file mode 100644 index 0000000000000..e08cc13cbe326 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-76 @@ -0,0 +1 @@ +reservations[].instances[?bar==`1`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-77 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-77 new file mode 100644 index 0000000000000..1ec43f45fce67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-77 @@ -0,0 +1 @@ +reservations[*].instances[?bar==`1`] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-78 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-78 new file mode 100644 index 0000000000000..3038711634999 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-78 @@ -0,0 +1 @@ +reservations[].instances[?bar==`1`][] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-79 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-79 new file mode 100644 index 0000000000000..e3875746b35b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-79 @@ -0,0 +1 @@ +foo[?bar==`1`].bar[0] \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-8 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-8 new file mode 100644 index 0000000000000..da7bc1ccfd098 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-8 @@ -0,0 +1 @@ +bad.morebad.morebad \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-80 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-80 new file mode 100644 index 0000000000000..5c3d683565d98 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-80 @@ -0,0 +1 @@ +foo[?a==`1`].b.c \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-81 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-81 new file mode 100644 index 0000000000000..6232808f0aece --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-81 @@ -0,0 +1 @@ +abs(foo) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-82 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-82 new file mode 100644 index 0000000000000..6232808f0aece --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-82 @@ -0,0 +1 @@ +abs(foo) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-83 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-83 new file mode 100644 index 0000000000000..29497f4ff5436 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-83 @@ -0,0 +1 @@ +abs(array[1]) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-84 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-84 new file mode 100644 index 0000000000000..29497f4ff5436 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-84 @@ -0,0 +1 @@ +abs(array[1]) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-85 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-85 new file mode 100644 index 0000000000000..346696563f885 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-85 @@ -0,0 +1 @@ +abs(`-24`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-86 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-86 new file mode 100644 index 0000000000000..346696563f885 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-86 @@ -0,0 +1 @@ +abs(`-24`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-87 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-87 new file mode 100644 index 0000000000000..c6268f8473be1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-87 @@ -0,0 +1 @@ +avg(numbers) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-88 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-88 new file mode 100644 index 0000000000000..7ce703695ee07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-88 @@ -0,0 +1 @@ +ceil(`1.2`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-89 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-89 new file mode 100644 index 0000000000000..0561bc26d9a0c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-89 @@ -0,0 +1 @@ +ceil(decimals[0]) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-9 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-9 new file mode 100644 index 0000000000000..19102815663d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-9 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-90 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-90 new file mode 100644 index 0000000000000..c78c1fc300231 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-90 @@ -0,0 +1 @@ +ceil(decimals[1]) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-91 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-91 new file mode 100644 index 0000000000000..ebcb4bbdbb9bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-91 @@ -0,0 +1 @@ +ceil(decimals[2]) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-92 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-92 new file mode 100644 index 0000000000000..6edbf1afe4a5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-92 @@ -0,0 +1 @@ +contains('abc', 'a') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-93 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-93 new file mode 100644 index 0000000000000..d2b2f070dabd5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-93 @@ -0,0 +1 @@ +contains('abc', 'd') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-94 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-94 new file mode 100644 index 0000000000000..3535da2eca689 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-94 @@ -0,0 +1 @@ +contains(strings, 'a') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-95 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-95 new file mode 100644 index 0000000000000..ba839fe60d12f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-95 @@ -0,0 +1 @@ +contains(decimals, `1.2`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-96 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-96 new file mode 100644 index 0000000000000..f4358186979c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-96 @@ -0,0 +1 @@ +contains(decimals, `false`) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-97 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-97 new file mode 100644 index 0000000000000..adb65fc01f003 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-97 @@ -0,0 +1 @@ +ends_with(str, 'r') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-98 b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-98 new file mode 100644 index 0000000000000..93d6901beadd7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-98 @@ -0,0 +1 @@ +ends_with(str, 'tr') \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go new file mode 100644 index 0000000000000..c7df08782a12d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go @@ -0,0 +1,13 @@ +package jmespath + +import "github.com/jmespath/go-jmespath" + +// Fuzz will fuzz test the JMESPath parser. +func Fuzz(data []byte) int { + p := jmespath.NewParser() + _, err := p.Parse(string(data)) + if err != nil { + return 1 + } + return 0 +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 0000000000000..13c74604c2c8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter_test.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter_test.go new file mode 100644 index 0000000000000..5b529c4f313b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter_test.go @@ -0,0 +1,213 @@ +package jmespath + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +type scalars struct { + Foo string + Bar string +} + +type sliceType struct { + A string + B []scalars + C []*scalars +} + +type benchmarkStruct struct { + Fooasdfasdfasdfasdf string +} + +type benchmarkNested struct { + Fooasdfasdfasdfasdf nestedA +} + +type nestedA struct { + Fooasdfasdfasdfasdf nestedB +} + +type nestedB struct { + Fooasdfasdfasdfasdf nestedC +} + +type nestedC struct { + Fooasdfasdfasdfasdf string +} + +type nestedSlice struct { + A []sliceType +} + +func TestCanSupportEmptyInterface(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + result, err := Search("foo", data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestCanSupportUserDefinedStructsValue(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportUserDefinedStructsRef(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceAll(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlicingExpression(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("B[:].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithFilterProjection(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("B[? `true` ].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlice(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("B[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithOrExpressions(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: nil} + result, err := Search("C || A", data) + assert.Nil(err) + assert.Equal("foo", result) +} + +func TestCanSupportStructWithSlicePointer(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: []*scalars{&scalars{"f1", "b1"}, &scalars{"correct", "b2"}}} + result, err := Search("C[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestWillAutomaticallyCapitalizeFieldNames(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + // Note that there's a lower cased "foo" instead of "Foo", + // but it should still correspond to the Foo field in the + // scalars struct + result, err := Search("foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceLowerCased(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} + result, err := Search("b[-1].foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithNestedPointers(t *testing.T) { + assert := assert.New(t) + data := struct{ A *struct{ B int } }{} + result, err := Search("A.B", data) + assert.Nil(err) + assert.Nil(result) +} + +func TestCanSupportFlattenNestedSlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {B: []scalars{{Foo: "f1a"}, {Foo: "f1b"}}}, + {B: []scalars{{Foo: "f2a"}, {Foo: "f2b"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1a", "f1b", "f2a", "f2b"}, result) +} + +func TestCanSupportFlattenNestedEmptySlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {}, {B: []scalars{{Foo: "a"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"a"}, result) +} + +func TestCanSupportProjectionsWithStructs(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {A: "first"}, {A: "second"}, {A: "third"}, + }} + result, err := Search("A[*].A", data) + assert.Nil(err) + assert.Equal([]interface{}{"first", "second", "third"}, result) +} + +func BenchmarkInterpretSingleFieldStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf") + data := benchmarkStruct{"foobarbazqux"} + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + data := benchmarkNested{ + nestedA{ + nestedB{ + nestedC{"foobarbazqux"}, + }, + }, + } + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedMaps(b *testing.B) { + jsonData := []byte(`{"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": "foobarbazqux"}}}}`) + var data interface{} + json.Unmarshal(jsonData, &data) + + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + for i := 0; i < b.N; i++ { + intr.Execute(ast, data) + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 0000000000000..817900c8f5296 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer_test.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer_test.go new file mode 100644 index 0000000000000..7a9a9ee24bf87 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer_test.go @@ -0,0 +1,161 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var lexingTests = []struct { + expression string + expected []token +}{ + {"*", []token{token{tStar, "*", 0, 1}}}, + {".", []token{token{tDot, ".", 0, 1}}}, + {"[?", []token{token{tFilter, "[?", 0, 2}}}, + {"[]", []token{token{tFlatten, "[]", 0, 2}}}, + {"(", []token{token{tLparen, "(", 0, 1}}}, + {")", []token{token{tRparen, ")", 0, 1}}}, + {"[", []token{token{tLbracket, "[", 0, 1}}}, + {"]", []token{token{tRbracket, "]", 0, 1}}}, + {"{", []token{token{tLbrace, "{", 0, 1}}}, + {"}", []token{token{tRbrace, "}", 0, 1}}}, + {"||", []token{token{tOr, "||", 0, 2}}}, + {"|", []token{token{tPipe, "|", 0, 1}}}, + {"29", []token{token{tNumber, "29", 0, 2}}}, + {"2", []token{token{tNumber, "2", 0, 1}}}, + {"0", []token{token{tNumber, "0", 0, 1}}}, + {"-20", []token{token{tNumber, "-20", 0, 3}}}, + {"foo", []token{token{tUnquotedIdentifier, "foo", 0, 3}}}, + {`"bar"`, []token{token{tQuotedIdentifier, "bar", 0, 3}}}, + // Escaping the delimiter + {`"bar\"baz"`, []token{token{tQuotedIdentifier, `bar"baz`, 0, 7}}}, + {",", []token{token{tComma, ",", 0, 1}}}, + {":", []token{token{tColon, ":", 0, 1}}}, + {"<", []token{token{tLT, "<", 0, 1}}}, + {"<=", []token{token{tLTE, "<=", 0, 2}}}, + {">", []token{token{tGT, ">", 0, 1}}}, + {">=", []token{token{tGTE, ">=", 0, 2}}}, + {"==", []token{token{tEQ, "==", 0, 2}}}, + {"!=", []token{token{tNE, "!=", 0, 2}}}, + {"`[0, 1, 2]`", []token{token{tJSONLiteral, "[0, 1, 2]", 1, 9}}}, + {"'foo'", []token{token{tStringLiteral, "foo", 1, 3}}}, + {"'a'", []token{token{tStringLiteral, "a", 1, 1}}}, + {`'foo\'bar'`, []token{token{tStringLiteral, "foo'bar", 1, 7}}}, + {"@", []token{token{tCurrent, "@", 0, 1}}}, + {"&", []token{token{tExpref, "&", 0, 1}}}, + // Quoted identifier unicode escape sequences + {`"\u2713"`, []token{token{tQuotedIdentifier, "✓", 0, 3}}}, + {`"\\"`, []token{token{tQuotedIdentifier, `\`, 0, 1}}}, + {"`\"foo\"`", []token{token{tJSONLiteral, "\"foo\"", 1, 5}}}, + // Combinations of tokens. + {"foo.bar", []token{ + token{tUnquotedIdentifier, "foo", 0, 3}, + token{tDot, ".", 3, 1}, + token{tUnquotedIdentifier, "bar", 4, 3}, + }}, + {"foo[0]", []token{ + token{tUnquotedIdentifier, "foo", 0, 3}, + token{tLbracket, "[", 3, 1}, + token{tNumber, "0", 4, 1}, + token{tRbracket, "]", 5, 1}, + }}, + {"foo[?a 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{ASTNode{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser_test.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser_test.go new file mode 100644 index 0000000000000..997a0f4d7c141 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser_test.go @@ -0,0 +1,136 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var parsingErrorTests = []struct { + expression string + msg string +}{ + {"foo.", "Incopmlete expression"}, + {"[foo", "Incopmlete expression"}, + {"]", "Invalid"}, + {")", "Invalid"}, + {"}", "Invalid"}, + {"foo..bar", "Invalid"}, + {`foo."bar`, "Forwards lexer errors"}, + {`{foo: bar`, "Incomplete expression"}, + {`{foo bar}`, "Invalid"}, + {`[foo bar]`, "Invalid"}, + {`foo@`, "Invalid"}, + {`&&&&&&&&&&&&t(`, "Invalid"}, + {`[*][`, "Invalid"}, +} + +func TestParsingErrors(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + for _, tt := range parsingErrorTests { + _, err := parser.Parse(tt.expression) + assert.NotNil(err, fmt.Sprintf("Expected parsing error: %s, for expression: %s", tt.msg, tt.expression)) + } +} + +var prettyPrinted = `ASTProjection { + children: { + ASTField { + value: "foo" + } + ASTSubexpression { + children: { + ASTSubexpression { + children: { + ASTField { + value: "bar" + } + ASTField { + value: "baz" + } + } + ASTField { + value: "qux" + } + } +} +` + +var prettyPrintedCompNode = `ASTFilterProjection { + children: { + ASTField { + value: "a" + } + ASTIdentity { + } + ASTComparator { + value: tLTE + children: { + ASTField { + value: "b" + } + ASTField { + value: "c" + } + } +} +` + +func TestPrettyPrintedAST(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("foo[*].bar.baz.qux") + assert.Equal(parsed.PrettyPrint(0), prettyPrinted) +} + +func TestPrettyPrintedCompNode(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("a[?b<=c]") + assert.Equal(parsed.PrettyPrint(0), prettyPrintedCompNode) +} + +func BenchmarkParseIdentifier(b *testing.B) { + runParseBenchmark(b, exprIdentifier) +} + +func BenchmarkParseSubexpression(b *testing.B) { + runParseBenchmark(b, exprSubexpr) +} + +func BenchmarkParseDeeplyNested50(b *testing.B) { + runParseBenchmark(b, deeplyNested50) +} + +func BenchmarkParseDeepNested50Pipe(b *testing.B) { + runParseBenchmark(b, deeplyNested50Pipe) +} + +func BenchmarkParseDeepNested50Index(b *testing.B) { + runParseBenchmark(b, deeplyNested50Index) +} + +func BenchmarkParseQuotedIdentifier(b *testing.B) { + runParseBenchmark(b, exprQuotedIdentifier) +} + +func BenchmarkParseQuotedIdentifierEscapes(b *testing.B) { + runParseBenchmark(b, quotedIdentifierEscapes) +} + +func BenchmarkParseRawStringLiteral(b *testing.B) { + runParseBenchmark(b, rawStringLiteral) +} + +func BenchmarkParseDeepProjection104(b *testing.B) { + runParseBenchmark(b, deepProjection104) +} + +func runParseBenchmark(b *testing.B, expression string) { + parser := NewParser() + for i := 0; i < b.N; i++ { + parser.Parse(expression) + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 0000000000000..dae79cbdf338e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 0000000000000..ddc1b7d7d4600 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util_test.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util_test.go new file mode 100644 index 0000000000000..1e2cd93520627 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util_test.go @@ -0,0 +1,73 @@ +package jmespath + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSlicePositiveStep(t *testing.T) { + assert := assert.New(t) + input := make([]interface{}, 5) + input[0] = 0 + input[1] = 1 + input[2] = 2 + input[3] = 3 + input[4] = 4 + result, err := slice(input, []sliceParam{sliceParam{0, true}, sliceParam{3, true}, sliceParam{1, true}}) + assert.Nil(err) + assert.Equal(input[:3], result) +} + +func TestIsFalseJSONTypes(t *testing.T) { + assert := assert.New(t) + assert.True(isFalse(false)) + assert.True(isFalse("")) + var empty []interface{} + assert.True(isFalse(empty)) + m := make(map[string]interface{}) + assert.True(isFalse(m)) + assert.True(isFalse(nil)) + +} + +func TestIsFalseWithUserDefinedStructs(t *testing.T) { + assert := assert.New(t) + type nilStructType struct { + SliceOfPointers []*string + } + nilStruct := nilStructType{SliceOfPointers: nil} + assert.True(isFalse(nilStruct.SliceOfPointers)) + + // A user defined struct will never be false though, + // even if it's fields are the zero type. + assert.False(isFalse(nilStruct)) +} + +func TestIsFalseWithNilInterface(t *testing.T) { + assert := assert.New(t) + var a *int = nil + var nilInterface interface{} + nilInterface = a + assert.True(isFalse(nilInterface)) +} + +func TestIsFalseWithMapOfUserStructs(t *testing.T) { + assert := assert.New(t) + type foo struct { + Bar string + Baz string + } + m := make(map[int]foo) + assert.True(isFalse(m)) +} + +func TestObjsEqual(t *testing.T) { + assert := assert.New(t) + assert.True(objsEqual("foo", "foo")) + assert.True(objsEqual(20, 20)) + assert.True(objsEqual([]int{1, 2, 3}, []int{1, 2, 3})) + assert.True(objsEqual(nil, nil)) + assert.True(!objsEqual(nil, "foo")) + assert.True(objsEqual([]int{}, []int{})) + assert.True(!objsEqual([]int{}, nil)) +} diff --git a/Gruntfile.js b/Gruntfile.js index 43cb1f09bd13b..70defdeaf6dba 100644 --- a/Gruntfile.js +++ b/Gruntfile.js @@ -6,6 +6,7 @@ module.exports = function (grunt) { pkg: grunt.file.readJSON('package.json'), baseDir: '.', srcDir: 'public', + genDir: 'public_gen', destDir: 'dist', tempDir: 'tmp', arch: os.arch(), diff --git a/README.md b/README.md index 0a7db31833849..3886c32a971f0 100644 --- a/README.md +++ b/README.md @@ -6,16 +6,14 @@ [Email](mailto:contact@grafana.org) Grafana is an open source, feature rich metrics dashboard and graph editor for -Graphite, InfluxDB & OpenTSDB. +Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB. ![](http://grafana.org/assets/img/start_page_bg.png) -## Grafana 2.0 -The develop branch has now been merged with master. For 1.9 users this is a big change as Grafana is no longer -a standalone frontend only web application. Grafana 2.0 comes with a backend. -- [Install instructions](http://docs.grafana.org/v2.0/installation/) -- [Migrating from 1.x to 2.x](http://docs.grafana.org/v2.0/installation/migrating_to2/) -- [What's New in Grafana 2.0](http://docs.grafana.org/v2.0/guides/whats-new-in-v2/) +- [Install instructions](http://docs.grafana.org/installation/) +- [What's New in Grafana 2.0](http://docs.grafana.org/guides/whats-new-in-v2/) +- [What's New in Grafana 2.1](http://docs.grafana.org/guides/whats-new-in-v2-1/) +- [What's New in Grafana 2.5](http://docs.grafana.org/guides/whats-new-in-v2-5/) ## Features ### Graphite Target Editor @@ -30,7 +28,7 @@ a standalone frontend only web application. Grafana 2.0 comes with a backend. - Click and drag to zoom - Multiple Y-axis, logarithmic scales - Bars, Lines, Points -- Smart Y-axis formating +- Smart Y-axis formatting - Series toggles & color selector - Legend values, and formatting options - Grid thresholds, axis labels @@ -47,6 +45,9 @@ a standalone frontend only web application. Grafana 2.0 comes with a backend. - [Time range controls](http://docs.grafana.org/reference/timerange/) - [Share snapshots publicly](http://docs.grafana.org/v2.0/reference/sharing/) +### Elasticsearch +- Feature rich query editor UI + ### InfluxDB - Use InfluxDB as a metric data source, annotation source - Query editor with series and column typeahead, easy group by and function selection @@ -69,12 +70,12 @@ If you have any problems please read the [troubleshooting guide](http://docs.gra Be sure to read the [getting started guide](http://docs.grafana.org/guides/gettingstarted/) and the other feature guides. ## Run from master -If you want to build a package your self, or contribute. Here is a guide for how to do that. You can always find +If you want to build a package yourself, or contribute. Here is a guide for how to do that. You can always find the latest master builds [here](http://grafana.org/download/builds) ### Dependencies -- Go 1.4 +- Go 1.5 - NodeJS ### Get Code @@ -84,11 +85,12 @@ go get github.com/grafana/grafana ``` ### Building the backend +Replace X.Y.Z by actual version number. ``` cd $GOPATH/src/github.com/grafana/grafana go run build.go setup (only needed once to install godep) godep restore (will pull down all golang lib dependencies in your current GOPATH) -go build . +go run build.go build ``` ### Building frontend assets @@ -111,7 +113,7 @@ bra run ### Running ``` -./grafana +./bin/grafana-server ``` Open grafana in your browser (default http://localhost:3000) and login with admin user (default user/pass = admin/admin). @@ -127,13 +129,14 @@ You only need to add the options you want to override. Config files are applied ## Create a pull request Before or after you create a pull request, sign the [contributor license agreement](http://grafana.org/docs/contributing/cla.html). + ## Contribute If you have any idea for an improvement or found a bug do not hesitate to open an issue. And if you have time clone this repo and submit a pull request and help me make Grafana the kickass metrics & devops dashboard we all dream about! Before creating a pull request be sure that "grunt test" runs without any style or unit test errors, also -please [sign the CLA](http://grafana.org/docs/contributing/cla.html) +please [sign the CLA](http://docs.grafana.org/project/cla/) ## License diff --git a/bower.json b/bower.json index 3607de4d8f046..88cb0b3df4afb 100644 --- a/bower.json +++ b/bower.json @@ -14,11 +14,11 @@ ], "dependencies": { "jquery": "~2.1.4", - "angular": "~1.4.0", - "angular-route": "~1.4.0", - "angular-mocks": "~1.4.0", - "angular-sanitize": "~1.4.0", - "angular-native-dragdrop": "~1.1.0", + "angular": "~1.4.3", + "angular-route": "~1.4.3", + "angular-mocks": "~1.4.3", + "angular-sanitize": "~1.4.3", + "angular-native-dragdrop": "~1.1.1", "angular-bindonce": "~0.3.3", "requirejs": "~2.1.18", "requirejs-text": "~2.0.14" diff --git a/build.go b/build.go index 798faadfca098..4a345c5fc730a 100644 --- a/build.go +++ b/build.go @@ -89,8 +89,13 @@ func main() { } func makeLatestDistCopies() { + rpmIteration := "-1" + if linuxPackageIteration != "" { + rpmIteration = "-" + linuxPackageIteration + } + runError("cp", "dist/grafana_"+version+"_amd64.deb", "dist/grafana_latest_amd64.deb") - runError("cp", "dist/grafana-"+strings.Replace(version, "-", "_", 5)+"-1.x86_64.rpm", "dist/grafana-latest-1.x86_64.rpm") + runError("cp", "dist/grafana-"+linuxPackageVersion+rpmIteration+".x86_64.rpm", "dist/grafana-latest-1.x86_64.rpm") runError("cp", "dist/grafana-"+version+".linux-x64.tar.gz", "dist/grafana-latest.linux-x64.tar.gz") } @@ -221,6 +226,7 @@ func createPackage(options linuxPackageOptions) { "--license", "Apache 2.0", "--maintainer", "contact@grafana.org", "--config-files", options.configFilePath, + "--config-files", options.ldapFilePath, "--config-files", options.initdScriptFilePath, "--config-files", options.etcDefaultFilePath, "--config-files", options.systemdServiceFilePath, @@ -322,9 +328,9 @@ func build(pkg string, tags []string) { func ldflags() string { var b bytes.Buffer b.WriteString("-w") - b.WriteString(fmt.Sprintf(" -X main.version '%s'", version)) - b.WriteString(fmt.Sprintf(" -X main.commit '%s'", getGitSha())) - b.WriteString(fmt.Sprintf(" -X main.buildstamp %d", buildStamp())) + b.WriteString(fmt.Sprintf(" -X main.version=%s", version)) + b.WriteString(fmt.Sprintf(" -X main.commit=%s", getGitSha())) + b.WriteString(fmt.Sprintf(" -X main.buildstamp=%d", buildStamp())) return b.String() } diff --git a/circle.yml b/circle.yml index 32ace393c8b49..bb22fcb0a6ec5 100644 --- a/circle.yml +++ b/circle.yml @@ -1,4 +1,6 @@ machine: + node: + version: 4.0 environment: GOPATH: "/home/ubuntu/.go_workspace" ORG_PATH: "github.com/grafana" diff --git a/conf/defaults.ini b/conf/defaults.ini index e8527a488f6d3..8b2b34f3fbc33 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -3,6 +3,7 @@ # Do not modify this file in grafana installs # +# possible values : production, development app_mode = production #################################### Paths #################################### @@ -66,7 +67,7 @@ path = grafana.db #################################### Session #################################### [session] -# Either "memory", "file", "redis", "mysql", "postgresql", default is "file" +# Either "memory", "file", "redis", "mysql", "postgres", default is "file" provider = file # Provider config options @@ -86,6 +87,7 @@ cookie_secure = false # Session life time, default is 86400 session_life_time = 86400 +gc_interval_time = 86400 #################################### Analytics #################################### [analytics] @@ -98,6 +100,9 @@ reporting_enabled = true # Google Analytics universal tracking code, only enabled if you specify an id here google_analytics_ua_id = +# Google Tag Manager ID, only enabled if you specify an id here +google_tag_manager_id = + #################################### Security #################################### [security] # default admin user, created on startup @@ -117,6 +122,9 @@ cookie_remember_name = grafana_remember # disable gravatar profile images disable_gravatar = false +# data source proxy whitelist (ip_or_domain:port seperated by spaces) +data_source_proxy_whitelist = + #################################### Users #################################### [users] # disable user signup / registration @@ -131,6 +139,9 @@ auto_assign_org = true # Default role new users will be automatically assigned (if auto_assign_org above is set to true) auto_assign_org_role = Viewer +# Require email validation before sign up completes +verify_email_enabled = false + #################################### Anonymous Auth ########################## [auth.anonymous] # enable anonymous access @@ -153,7 +164,6 @@ auth_url = https://github.com/login/oauth/authorize token_url = https://github.com/login/oauth/access_token api_url = https://api.github.com/user team_ids = -allowed_domains = allowed_organizations = #################################### Google Auth ########################## @@ -214,6 +224,8 @@ level = Info # For "console" mode only [log.console] level = +# Set formatting to "false" to disable color formatting of console logs +formatting = false # For "file" mode only [log.file] @@ -244,4 +256,37 @@ exchange = grafana_events enabled = false path = /var/lib/grafana/dashboards +#################################### Usage Quotas ########################## +[quota] +enabled = false + +#### set quotas to -1 to make unlimited. #### +# limit number of users per Org. +org_user = 10 + +# limit number of dashboards per Org. +org_dashboard = 100 + +# limit number of data_sources per Org. +org_data_source = 10 + +# limit number of api_keys per Org. +org_api_key = 10 + +# limit number of orgs a user can create. +user_org = 10 + +# Global limit of users. +global_user = -1 + +# global limit of orgs. +global_org = -1 + +# global limit of dashboards +global_dashboard = -1 + +# global limit of api_keys +global_api_key = -1 +# global limit on number of logged in users. +global_session = -1 diff --git a/conf/ldap.toml b/conf/ldap.toml index 7deb090f9d5eb..aa8a9679d6839 100644 --- a/conf/ldap.toml +++ b/conf/ldap.toml @@ -2,7 +2,7 @@ verbose_logging = false [[servers]] -# Ldap server host +# Ldap server host (specify multiple hosts space separated) host = "127.0.0.1" # Default port is 389 or 636 if use_ssl = true port = 389 @@ -10,17 +10,29 @@ port = 389 use_ssl = false # set to true if you want to skip ssl cert validation ssl_skip_verify = false +# set to the path to your root CA certificate or leave unset to use system defaults +# root_ca_cert = /path/to/certificate.crt # Search user bind dn bind_dn = "cn=admin,dc=grafana,dc=org" # Search user bind password bind_password = 'grafana' -# Search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" +# User search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)" search_filter = "(cn=%s)" + # An array of base dns to search through search_base_dns = ["dc=grafana,dc=org"] +# In POSIX LDAP schemas, without memberOf attribute a secondary query must be made for groups. +# This is done by enabling group_search_filter below. You must also set member_of= "cn" +# in [servers.attributes] below. + +## Group search filter, to retrieve the groups of which the user is a member (only set if memberOf attribute is not available) +# group_search_filter = "(&(objectClass=posixGroup)(memberUid=%s))" +## An array of the base DNs to search through for groups. Typically uses ou=groups +# group_search_base_dns = ["ou=groups,dc=grafana,dc=org"] + # Specify names of the ldap attributes your ldap uses [servers.attributes] name = "givenName" @@ -36,7 +48,7 @@ org_role = "Admin" # The Grafana organization database id, optional, if left out the default org (id 1) will be used # org_id = 1 -[[server.group_mappings]] +[[servers.group_mappings]] group_dn = "cn=users,dc=grafana,dc=org" org_role = "Editor" diff --git a/conf/sample.ini b/conf/sample.ini index 5baf097ea20a0..e8550c66c1aa1 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -3,6 +3,7 @@ # Everything has defaults so you only need to uncomment things you want to # change +# possible values : production, development ; app_mode = production #################################### Paths #################################### @@ -66,7 +67,7 @@ #################################### Session #################################### [session] -# Either "memory", "file", "redis", "mysql", "postgresql", default is "file" +# Either "memory", "file", "redis", "mysql", "postgres", default is "file" ;provider = file # Provider config options @@ -116,6 +117,9 @@ # disable gravatar profile images ;disable_gravatar = false +# data source proxy whitelist (ip_or_domain:port seperated by spaces) +;data_source_proxy_whitelist = + #################################### Users #################################### [users] # disable user signup / registration @@ -152,7 +156,6 @@ ;token_url = https://github.com/login/oauth/access_token ;api_url = https://api.github.com/user ;team_ids = -;allowed_domains = ;allowed_organizations = #################################### Google Auth ########################## diff --git a/docker/blocks/postgres/fig b/docker/blocks/postgres/fig new file mode 100644 index 0000000000000..c7458cbe4008f --- /dev/null +++ b/docker/blocks/postgres/fig @@ -0,0 +1,7 @@ +postgrestest: + image: postgres:latest + environment: + POSTGRES_USER: grafana + POSTGRES_PASSWORD: password + ports: + - "5432:5432" diff --git a/docker/blocks/prometheus/Dockerfile b/docker/blocks/prometheus/Dockerfile new file mode 100644 index 0000000000000..0e07679ddd57f --- /dev/null +++ b/docker/blocks/prometheus/Dockerfile @@ -0,0 +1,2 @@ +FROM prom/prometheus +ADD prometheus.yml /etc/prometheus/ diff --git a/docker/blocks/prometheus/fig b/docker/blocks/prometheus/fig new file mode 100644 index 0000000000000..0880902c9fdd9 --- /dev/null +++ b/docker/blocks/prometheus/fig @@ -0,0 +1,6 @@ +prometheus: + build: blocks/prometheus + ports: + - "9090:9090" + volumes: + - /var/docker/prometheus:/prometheus-data diff --git a/docker/blocks/prometheus/prometheus.yml b/docker/blocks/prometheus/prometheus.yml new file mode 100644 index 0000000000000..b0fc2a919cd15 --- /dev/null +++ b/docker/blocks/prometheus/prometheus.yml @@ -0,0 +1,26 @@ +# my global config +global: + scrape_interval: 10s # By default, scrape targets every 15 seconds. + evaluation_interval: 10s # By default, scrape targets every 15 seconds. + # scrape_timeout is set to the global default (10s). + +# Load and evaluate rules in this file every 'evaluation_interval' seconds. +rule_files: + # - "first.rules" + # - "second.rules" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 10s + scrape_timeout: 10s + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + target_groups: + - targets: ['localhost:9090', '172.17.42.1:9091'] diff --git a/docker/production/README.md b/docker/production/README.md index f6da10e62f6c3..9b0e23baf756d 100644 --- a/docker/production/README.md +++ b/docker/production/README.md @@ -2,7 +2,7 @@ # Grafana docker image This container currently only contains the in development alpha of Grafana 2.0 (ie non production use). The -`#develop` tag is constantly updated as we make progress torwards a beta release. +`#develop` tag is constantly updated as we make progress towards a beta release. ## Running your Grafana image @@ -17,7 +17,7 @@ Try it out, default admin user is admin/admin. ## Configuring your Grafana container -All options defined in conf/grafana.ini can be overriden using environment variables, for example: +All options defined in conf/grafana.ini can be overridden using environment variables, for example: ``` diff --git a/docs/Makefile b/docs/Makefile index fcb1708f9166f..9fd8cd6399f26 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -44,7 +44,7 @@ docs-test: docs-build $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh docs-build: - git fetch https://github.com/grafana/grafana.git docs-1.x && git diff --name-status FETCH_HEAD...HEAD -- . > changed-files + git fetch https://github.com/grafana/grafana.git docs-2.1 && git diff --name-status FETCH_HEAD...HEAD -- . > changed-files echo "$(GIT_BRANCH)" > GIT_BRANCH echo "$(GITCOMMIT)" > GITCOMMIT docker build -t "$(DOCKER_DOCS_IMAGE)" . diff --git a/docs/README.md b/docs/README.md index c67ce97b2dc0b..36c636fcc72d7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -15,4 +15,4 @@ $ cd docs $ make docs ``` -Open [localhost:8100](http://localhost:8180) to view the docs. +Open [localhost:8180](http://localhost:8180) to view the docs. diff --git a/docs/VERSION b/docs/VERSION index 7ec1d6db40877..437459cd94c9f 100644 --- a/docs/VERSION +++ b/docs/VERSION @@ -1 +1 @@ -2.1.0 +2.5.0 diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index c58a13f48aa8e..f965e34c5bd9a 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -25,46 +25,56 @@ google_analytics: ['UA-47280256-1', 'grafana.org'] pages: # Introduction: -- ['index.md', 'About', 'Grafana'] +- ['index.md', 'Project', 'About Grafana'] +- ['project/cla.md', 'Project', 'Contributor License Agreement'] + - ['installation/index.md', 'Installation', 'Installation'] - ['installation/debian.md', 'Installation', 'Installing on Debian / Ubuntu'] - ['installation/rpm.md', 'Installation', 'Installing on RPM-based Linux'] - ['installation/mac.md', 'Installation', 'Installing on Mac OS X'] - ['installation/windows.md', 'Installation', 'Installing on Windows'] - ['installation/docker.md', 'Installation', 'Installing on Docker'] +- ['project/building_from_source.md', 'Installation', 'Building from Source'] - ['installation/configuration.md', 'Installation', 'Configuration'] - ['installation/ldap.md', 'Installation', 'LDAP Integration'] - ['installation/provisioning.md', 'Installation', 'Provisioning'] -- ['installation/performance.md', 'Installation', 'Performance tips'] +- ['installation/performance.md', 'Installation', 'Performance Tips'] - ['installation/troubleshooting.md', 'Installation', 'Troubleshooting'] - ['installation/migrating_to2.md', 'Installation', 'Migrating from v1.x to v2.x'] -- ['guides/gettingstarted.md', 'User Guides', 'Getting started'] -- ['guides/whats-new-in-v2.md', 'User Guides', "What's New in Grafana v2.0"] +- ['guides/basic_concepts.md', 'User Guides', 'Basic Concepts'] +- ['guides/gettingstarted.md', 'User Guides', 'Getting Started'] +- ['guides/whats-new-in-v2-5.md', 'User Guides', "What's New in Grafana v2.5"] - ['guides/whats-new-in-v2-1.md', 'User Guides', "What's New in Grafana v2.1"] +- ['guides/whats-new-in-v2.md', 'User Guides', "What's New in Grafana v2.0"] - ['guides/screencasts.md', 'User Guides', 'Screencasts'] - ['reference/graph.md', 'Reference', 'Graph Panel'] - ['reference/singlestat.md', 'Reference', 'Singlestat Panel'] -- ['reference/dashlist.md', 'Reference', 'Dashboard list Panel'] +- ['reference/dashlist.md', 'Reference', 'Dashboard List Panel'] - ['reference/sharing.md', 'Reference', 'Sharing'] - ['reference/annotations.md', 'Reference', 'Annotations'] -- ['reference/timerange.md', 'Reference', 'Time range controls'] +- ['reference/timerange.md', 'Reference', 'Time Range Controls'] - ['reference/search.md', 'Reference', 'Dashboard Search'] -- ['reference/templating.md', 'Reference', 'Templated dashboards'] -- ['reference/scripting.md', 'Reference', 'Scripted dashboards'] +- ['reference/templating.md', 'Reference', 'Templated Dashboards'] +- ['reference/scripting.md', 'Reference', 'Scripted Dashboards'] - ['reference/playlist.md', 'Reference', 'Playlist'] - ['reference/export_import.md', 'Reference', 'Import & Export'] - ['reference/admin.md', 'Reference', 'Administration'] - ['reference/http_api.md', 'Reference', 'HTTP API'] +- ['reference/keyboard_shortcuts.md', 'Reference', 'Keyboard Shortcuts'] +- ['datasources/overview.md', 'Data Sources', 'Overview'] - ['datasources/graphite.md', 'Data Sources', 'Graphite'] +- ['datasources/elasticsearch.md', 'Data Sources', 'Elasticsearch'] +- ['datasources/cloudwatch.md', 'Data Sources', 'CloudWatch'] - ['datasources/influxdb.md', 'Data Sources', 'InfluxDB'] - ['datasources/opentsdb.md', 'Data Sources', 'OpenTSDB'] - ['datasources/kairosdb.md', 'Data Sources', 'KairosDB'] +- ['datasources/prometheus.md', 'Data Sources', 'Prometheus'] -- ['project/building_from_source.md', 'Project', 'Building from source'] -- ['project/cla.md', 'Project', 'Contributor License Agreement'] +- ['tutorials/index.md', 'Tutorials', 'Tutorials'] +- ['tutorials/hubot_howto.md', 'Tutorials', 'How To integrate Hubot and Grafana'] - ['jsearch.md', '**HIDDEN**'] diff --git a/docs/sources/datasources/cloudwatch.md b/docs/sources/datasources/cloudwatch.md new file mode 100644 index 0000000000000..012da9d843a63 --- /dev/null +++ b/docs/sources/datasources/cloudwatch.md @@ -0,0 +1,84 @@ +---- +page_title: Cloudwatch +page_description: Cloudwatch grafana datasource documentation +page_keywords: Cloudwatch, grafana, documentation, datasource, docs +--- + +# CloudWatch + +Grafana ships with built in support for CloudWatch. You just have to add it as a data source and you will +be ready to build dashboards for you CloudWatch metrics. + +## Adding the data source +![](/img/cloudwatch/cloudwatch_add.png) + +1. Open the side menu by clicking the the Grafana icon in the top header. +2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. + + > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. + +3. Click the `Add new` link in the top header. +4. Select `CloudWatch` from the dropdown. + +Name | Description +------------ | ------------- +Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. +Default | Default data source means that it will be pre-selected for new panels. +Credentials profile name | Specify the name of the profile to use (if you use `~/aws/credentials` file), leave blank for default. This option was introduced in Grafana 2.5.1 +Default Region | Used in query editor to set region (can be changed on per query basis) + +## Authentication + +### IAM Roles + +Currently all access to CloudWatch is done server side by the Grafana backend using the official AWS SDK. If you grafana +server is running on AWS you can use IAM Roles and authentication will be handled automatically. + +Checkout AWS docs on [IAM Roles]](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) + +### AWS credentials file + +Create a file at `~/.aws/credentials`. That is the `HOME` path for user running grafana-server. + +Example content: + + [default] + aws_access_key_id = asdsadasdasdasd + aws_secret_access_key = dasdasdsadasdasdasdsa + region = us-west-2 + + +## Metric Query Editor + +![](/img/cloudwatch/query_editor.png) + +You need to specify a namespace, metric, at least one stat, and at least one dimension. + +## Templated queries +CloudWatch Datasource Plugin provides the following functions in `Variables values query` field in Templating Editor to query `region`, `namespaces`, `metric names` and `dimension keys/values` on the CloudWatch. + +Name | Description +------- | -------- +`regions()` | Returns a list of regions AWS provides their service. +`namespaces()` | Returns a list of namespaces CloudWatch support. +`metrics(namespace)` | Returns a list of metrics in the namespace. +`dimension_keys(namespace)` | Returns a list of dimension keys in the namespace. +`dimension_values(region, namespace, metric)` | Returns a list of dimension values matching the specified `region`, `namespace` and `metric`. + +For details about the metrics CloudWatch provides, please refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html). + +If you want to filter dimension values by other dimension key/value pair, you can specify optional parameter like this. +```sql +dimension_values(region, namespace, metric, dim_key1=dim_val1,dim_key2=dim_val2,...) +``` + +![](/img/v2/cloudwatch_templating.png) + +## Cost + +It's worth to mention that Amazon will charge you for CloudWatch API usage. CloudWatch costs +$0.01 per 1,000 GetMetricStatistics or ListMetrics requests. For each query Grafana will +issue a GetMetricStatistics request and every time you pick a dimension in the query editor +Grafana will issue a ListMetrics request. + + diff --git a/docs/sources/datasources/elasticsearch.md b/docs/sources/datasources/elasticsearch.md new file mode 100644 index 0000000000000..76b0040ecd345 --- /dev/null +++ b/docs/sources/datasources/elasticsearch.md @@ -0,0 +1,58 @@ +---- +page_title: Elasticsearch +page_description: Elasticsearch grafana datasource documentation +page_keywords: Elasticsearch, grafana, kibana, documentation, datasource, docs +--- + +# Elasticsearch + +Grafana ships with advanced support for Elasticsearch. You can do many types of +simple or complex elasticsearch queries to visualize logs or metrics stored in elasticsearch. You can +also annotate your graphs with log events stored in elasticsearch. + +## Adding the data source +![](/img/v2/add_Graphite.jpg) + +1. Open the side menu by clicking the the Grafana icon in the top header. +2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. + + > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. + +3. Click the `Add new` link in the top header. +4. Select `Elasticsearch` from the dropdown. + +Name | Description +------------ | ------------- +Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. +Default | Default data source means that it will be pre-selected for new panels. +Url | The http protocol, ip and port of you elasticsearch server. +Access | Proxy = access via Grafana backend, Direct = access directory from browser. + +Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser. + +Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source. + +### Direct access +If you select direct access you must update your Elasticsearch configuration to allow other domains to access +Elasticsearch from the browser. You do this by specifying these to options in your **elasticsearch.yml** config file. + + http.cors.enabled: true + http.cors.allow-origin: "*" + +### Index settings + +![](/img/elasticsearch/elasticsearch_ds_details.png) + +Here you can specify a default for the `time field` and specify the name of your elasticsearch index. You can use +a time pattern for the index name or a wildcard. + +## Metric Query editor + +![](/img/elasticsearch/query_editor.png) + +The Elasticsearch query editor allows you to select multiple metrics and group by multiple terms or filters. Use the plus and minus icons to the right to add / remove +metrics or group bys. Some metrics and group by have options, click the option text to expand the the row to view and edit metric or group by options. + +## Annotations +TODO + diff --git a/docs/sources/datasources/graphite.md b/docs/sources/datasources/graphite.md index d41a987514c94..c4cae0b0c653e 100644 --- a/docs/sources/datasources/graphite.md +++ b/docs/sources/datasources/graphite.md @@ -10,14 +10,16 @@ Grafana has an advanced Graphite query editor that lets you quickly navigate the change function parameters and much more. The editor can handle all types of graphite queries. It can even handle complex nested queries through the use of query references. -## Adding the data source to Grafana -Open the side menu by clicking the the Grafana icon in the top header. In the side menu under the `Dashboards` link you -should find a link named `Data Sources`. If this link is missing in the side menu it means that your current -user does not have the `Admin` role for the current organization. +## Adding the data source +![](/img/v2/add_Graphite.jpg) -![](/img/v2/add_datasource_graphite.png) +1. Open the side menu by clicking the the Grafana icon in the top header. +2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. -Now click the `Add new` link in the top header. + > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. + +3. Click the `Add new` link in the top header. +4. Select `Graphite` from the dropdown. Name | Description ------------ | ------------- @@ -26,17 +28,21 @@ Default | Default data source means that it will be pre-selected for new panels. Url | The http protocol, ip and port of you graphite-web or graphite-api install. Access | Proxy = access via Grafana backend, Direct = access directory from browser. + +Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser. + +Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source. + + ## Metric editor ### Navigate metric segments - Click the ``Select metric`` link to start navigating the metric space. One you start you can continue using the mouse or keyboard arrow keys. You can select a wildcard and still continue. ![](/img/animated_gifs/graphite_query1.gif) ### Functions - Click the plus icon to the right to add a function. You can search for the function or select it from the menu. Once a function is selected it will be added and your focus will be in the text box of the first parameter. To later change a parameter just click on it and it will turn into a text box. To delete a function click the function name followed @@ -70,3 +76,6 @@ You can also create nested variables that use other variables in their definitio ![](/img/v2/templated_variable_parameter.png) + +## Query Reference +You can reference queries by the row “letter” that they’re on (similar to Microsoft Excel). If you add a second query to graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries. diff --git a/docs/sources/datasources/influxdb.md b/docs/sources/datasources/influxdb.md index 05e627967ebea..e57d5a63fbc00 100644 --- a/docs/sources/datasources/influxdb.md +++ b/docs/sources/datasources/influxdb.md @@ -10,27 +10,33 @@ There are currently two separate datasources for InfluxDB in Grafana: InfluxDB 0 The API and capabilities of InfluxDB 0.9.x are completely different from InfluxDB 0.8.x which is why Grafana handles them as different data sources. -## Adding the data source to Grafana -Open the side menu by clicking the the Grafana icon in the top header. In the side menu under the `Dashboards` link you -should find a link named `Data Sources`. If this link is missing in the side menu it means that your current -user does not have the `Admin` role for the current organization. +InfluxDB 0.9 is rapidly evolving and we continue to track its API. InfluxDB 0.8 is no longer maintained by InfluxDB Inc, but we provide support as a convenience to existing users. -![](/img/v2/add_datasource_influxdb.png) +## Adding the data source +![](/img/v2/add_Influx.jpg) -Now click the `Add new` link in the top header. +1. Open the side menu by clicking the the Grafana icon in the top header. +2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. + + > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. + +3. Click the `Add new` link in the top header. +4. Select `InfluxDB 0.9.x` or `InfluxDB 0.8.x` from the dropdown. Name | Description ------------ | ------------- Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. Default | Default data source means that it will be pre-selected for new panels. Url | The http protocol, ip and port of you influxdb api (influxdb api port is by default 8086) -Access | Proxy = access via Grafana backend, Direct = access directory from browser. +Access | Proxy = access via Grafana backend, Direct = access directly from browser. Database | Name of your influxdb database User | Name of your database user Password | Database user's password -> *Note* When using Proxy access mode the InfluxDB database, user and password will be hidden from the browser/frontend. When -> using direct access mode all users will be able to see the database user & password. + > Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser. + + > Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source. + ## InfluxDB 0.9.x @@ -45,7 +51,7 @@ the tag key and select `--remove tag filter--`. ### Regex matching You can type in regex patterns for metric names or tag filter values, be sure to wrap the regex pattern in forward slashes (`/`). Grafana -will automaticallay adjust the filter tag condition to use the InfluxDB regex match condition operator (`=~`). +will automatically adjust the filter tag condition to use the InfluxDB regex match condition operator (`=~`). ### Editor group by To group by a tag click the plus icon after the `GROUP BY ($interval)` text. Pick a tag from the dropdown that appears. @@ -62,6 +68,7 @@ You can switch to raw query mode by pressing the pen icon. - $m = replaced with measurement name - $measurement = replaced with measurement name +- $col = replaced with column name - $tag_hostname = replaced with the value of the hostname tag - You can also use [[tag_hostname]] pattern replacement syntax diff --git a/docs/sources/datasources/kairosdb.md b/docs/sources/datasources/kairosdb.md index f0d52b915480e..fc9b56825152a 100644 --- a/docs/sources/datasources/kairosdb.md +++ b/docs/sources/datasources/kairosdb.md @@ -5,15 +5,20 @@ page_keywords: grafana, kairosdb, documentation --- # KairosDB Guide +Grafana v2.1 brings initial support for KairosDB Datasources. While the process of adding the datasource is similar to adding a Graphite or OpenTSDB datasource type, Kairos DB does have a few different options for building queries. ## Adding the data source to Grafana -Open the side menu by clicking the the Grafana icon in the top header. In the side menu under the `Dashboards` link you -should find a link named `Data Sources`. If this link is missing in the side menu it means that your current -user does not have the `Admin` role for the current organization. +![](/img/v2/add_KairosDB.jpg) + +1. Open the side menu by clicking the the Grafana icon in the top header. +2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. + + > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. + +3. Click the `Add new` link in the top header. +4. Select `KairosDB` from the dropdown. - -Now click the `Add new` link in the top header. Name | Description ------------ | ------------- @@ -25,23 +30,23 @@ Access | Proxy = access via Grafana backend, Direct = access directory from brow ## Query editor Open a graph in edit mode by click the title. - - -For details on KairosDB metric queries checkout the offical. +![](/img/v2/kairos_query_editor.png) +For details on KairosDB metric queries checkout the official. - [Query Metrics - KairosDB 0.9.4 documentation](http://kairosdb.github.io/kairosdocs/restapi/QueryMetrics.html). ## Templated queries KairosDB Datasource Plugin provides following functions in `Variables values query` field in Templating Editor to query `metric names`, `tag names`, and `tag values` to kairosdb server. Name | Description ----- | ---- -`metrics(query)` | Returns a list of metric names. If nothing is given, returns a list of all metric names. -`tag_names(query)` | Returns a list of tag names. If nothing is given, returns a list of all tag names. -`tag_values(query)` | Returns a list of tag values. If nothing is given, returns a list of all tag values. +| ------- | --------| +`metrics(query)` | Returns a list of metric names matching `query`. If nothing is given, returns a list of all metric names. +`tag_names(query)` | Returns a list of tag names matching `query`. If nothing is given, returns a list of all tag names. +`tag_values(metric,tag)` | Returns a list of values for `tag` from the given `metric`. For details of `metric names`, `tag names`, and `tag values`, please refer to the KairosDB documentations. - [List Metric Names - KairosDB 0.9.4 documentation](http://kairosdb.github.io/kairosdocs/restapi/ListMetricNames.html) - [List Tag Names - KairosDB 0.9.4 documentation](http://kairosdb.github.io/kairosdocs/restapi/ListTagNames.html) - [List Tag Values - KairosDB 0.9.4 documentation](http://kairosdb.github.io/kairosdocs/restapi/ListTagValues.html) +- [Query Metrics - KairosDB 0.9.4 documentation](http://kairosdb.github.io/kairosdocs/restapi/QueryMetrics.html). diff --git a/docs/sources/datasources/opentsdb.md b/docs/sources/datasources/opentsdb.md index e9110418f4cc4..43fcda643ee13 100644 --- a/docs/sources/datasources/opentsdb.md +++ b/docs/sources/datasources/opentsdb.md @@ -5,15 +5,17 @@ page_keywords: grafana, opentsdb, documentation --- # OpenTSDB Guide +The newest release of Grafana adds additional functionality when using an OpenTSDB Data source. -## Adding the data source to Grafana -Open the side menu by clicking the the Grafana icon in the top header. In the side menu under the `Dashboards` link you -should find a link named `Data Sources`. If this link is missing in the side menu it means that your current -user does not have the `Admin` role for the current organization. +![](/img/v2/add_OpenTSDB.jpg) -![](/img/v2/add_datasource_opentsdb.png) +1. Open the side menu by clicking the the Grafana icon in the top header. +2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. -Now click the `Add new` link in the top header. + > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. + +3. Click the `Add new` link in the top header. +4. Select `OpenTSDB` from the dropdown. Name | Description ------------ | ------------- @@ -28,20 +30,23 @@ Open a graph in edit mode by click the title. ![](/img/v2/opentsdb_query_editor.png) ### Auto complete suggestions -You should get auto complete suggestions for tags and tag values. If you do not you need to enable `tsd.core.meta.enable_realtime_ts` in -the OpentSDB server settings. This is required for the OpenTSDB `lookup` api to work. +As soon as you start typing metric names, tag names and tag values , you should see highlighted auto complete suggestions for them. + + > Note: This is required for the OpenTSDB `suggest` api to work. ## Templating queries +Grafana's OpenTSDB data source now supports template variable values queries. This means you can create template variables that fetch the values from OpenTSDB (for example metric names, tag names, or tag values). The query editor is also enhanced to limiting tags by metric. When using OpenTSDB with a template variable of `query` type you can use following syntax for lookup. - metrics() // returns metric names + metrics(prefix) // returns metric names with specific prefix (can be empty) tag_names(cpu) // return tag names (i.e. keys) for a specific cpu metric tag_values(cpu, hostname) // return tag values for metric cpu and tag key hostname + suggest_tagk(prefix) // return tag names (i.e. keys) for all metrics with specific prefix (can be empty) + suggest_tagv(prefix) // return tag values for all metrics with specific prefix (can be empty) -For details on opentsdb metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html) - - - +If you do not see template variables being populated in `Preview of values` section, you need to enable `tsd.core.meta.enable_realtime_ts` in the OpenTSDB server settings. Also, to populate metadata of the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` on the OpenTSDB server. +> Note: This is required for the OpenTSDB `lookup` api to work. +For details on opentsdb metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html) diff --git a/docs/sources/datasources/overview.md b/docs/sources/datasources/overview.md new file mode 100644 index 0000000000000..8fe6538dcd3ee --- /dev/null +++ b/docs/sources/datasources/overview.md @@ -0,0 +1,25 @@ +---- +page_title: Data Source Overview +page_description: Data Source Overview +page_keywords: grafana, graphite, influxDB, KairosDB, OpenTSDB, Prometheus, documentation +--- + +# Data Source Overview +Grafana supports many different storage backends for your time series data (Data Source). Each Data Source has a specific Query Editor that is customized for the features and capabilities that the particular Data Source exposes. + + +## Querying +The query language and capabilities of each Data Source are obviously very different. You can combine data from multiple Data Sources onto a single Dashboard, but each Panel is tied to a specific Data Source that belongs to a particular Organization. + +## Supported Data Sources +The following datasources are officially supported: + +* [Graphite](/datasources/graphite/) +* [Elasticsearch](/datasources/elasticsearch/) +* [CloudWatch](/datasources/cloudwatch/) +* [InfluxDB](/datasources/influxdb/) +* [OpenTSDB](/datasources/opentsdb/) +* [KairosDB](/datasources/kairosdb) +* [Prometheus](/datasources/prometheus) + +Grafana can query any Elasticsearch index for annotation events, but at this time, it's not supported for metric queries. Learn more about [annotations](/reference/annotations/#elasticsearch-annotations) diff --git a/docs/sources/datasources/plugin_api.md b/docs/sources/datasources/plugin_api.md new file mode 100644 index 0000000000000..cdcaca2946031 --- /dev/null +++ b/docs/sources/datasources/plugin_api.md @@ -0,0 +1,40 @@ +---- +page_title: Data source Plugin API +page_description: Data Source Plugin Description +page_keywords: grafana, data source, plugin, api, docs +--- + +# Data source plugin API + +All data sources in Grafana are implemented as plugins. + +## Breaking change in 2.2 + +In Grafana 2.2 a breaking change was introduced for how data source query editors +are structured, defined and loaded. This was in order to support mixing multiple data sources +in the same panel. + +In Grafana 2.2, the query editor is no longer defined using the partials section in +`plugin.json`, but defined via an angular directive named using convention naming +scheme like `metricQueryEditor`. For example + +Graphite defines a directive like this: + +```javascript +module.directive('metricQueryEditorGraphite', function() { + return {controller: 'GraphiteQueryCtrl', templateUrl: 'app/plugins/datasource/graphite/partials/query.editor.html'}; +}); +``` + +Even though the data source type name is with lowercase `g`, the directive uses capital `G` in `Graphite` because +that is how angular directives needs to be named in order to match an element with name ``. +You also specify the query controller here instead of in the query.editor.html partial like before. + +### query.editor.html + +This partial needs to be updated, remove the `np-repeat` this is done in the outer partial now,m the query.editor.html +should only render a single query. Take a look at the Graphite or InfluxDB partials for `query.editor.html` for reference. +You should also add a `tight-form-item` with `{{target.refId}}`, all queries needs to be assigned a letter (`refId`). +These query reference letters are going to be utilized in a later feature. + + diff --git a/docs/sources/datasources/prometheus.md b/docs/sources/datasources/prometheus.md new file mode 100644 index 0000000000000..61f6c0d66e8dd --- /dev/null +++ b/docs/sources/datasources/prometheus.md @@ -0,0 +1,65 @@ +---- +page_title: Prometheus query guide +page_description: Prometheus query guide +page_keywords: grafana, prometheus, metrics, query, documentation +--- + +# Prometheus +Grafana includes support for Prometheus Datasources. While the process of adding the datasource is similar to adding a Graphite or OpenTSDB datasource type, Prometheus does have a few different options for building queries. + +## Adding the data source to Grafana +![](/img/v2/add_Prometheus.png) + +1. Open the side menu by clicking the the Grafana icon in the top header. +2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`. + + > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization. + +3. Click the `Add new` link in the top header. +4. Select `Prometheus` from the dropdown. + +Name | Description +------------ | ------------- +Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards. +Default | Default data source means that it will be pre-selected for new panels. +Url | The http protocol, ip and port of you Prometheus server (default port is usually 9090) +Access | Proxy = access via Grafana backend, Direct = access directory from browser. +Basic Auth | Enable basic authentication to the Prometheus datasource. +User | Name of your Prometheus user +Password | Database user's password + + > Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser. + + > Direct access is still supported because in some cases it may be useful to access a Data Source directly depending on the use case and topology of Grafana, the user, and the Data Source. + +## Query editor +Open a graph in edit mode by click the title. + +![](/img/v2/prometheus_editor.png) + +For details on Prometheus metric queries check out the Prometheus documentation +- [Query Metrics - Prometheus documentation](http://prometheus.io/docs/querying/basics/). + +## Templated queries +Prometheus Datasource Plugin provides the following functions in `Variables values query` field in Templating Editor to query `metric names` and `labels names` on the Prometheus server. + +Name | Description +------- | -------- +`label_values(label)` | Returns a list of label values for the `label` in every metric. +`label_values(metric, label)` | Returns a list of label values for the `label` in the specified metric. +`metrics(metric)` | Returns a list of metrics matching the specified `metric` regex. + +For details of `metric names` & `label names`, and `label values`, please refer to the [Prometheus documentation](http://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + +You can create a template variable in Grafana and have that variable filled with values from any Prometheus metric exploration query. +You can then use this variable in your Prometheus metric queries. + +For example you can have a variable that contains all values for label `hostname` if you specify a query like this +in the templating edit view. +```sql +label_values(hostname) +``` + +You can also use raw queries & regular expressions to extract anything you might need. + +![](/img/v2/prometheus_templating.png) diff --git a/docs/sources/guides/basic_concepts.md b/docs/sources/guides/basic_concepts.md new file mode 100644 index 0000000000000..9deda2ee97e3a --- /dev/null +++ b/docs/sources/guides/basic_concepts.md @@ -0,0 +1,96 @@ +---- +page_title: Graphite query guide +page_description: Graphite query guide +page_keywords: grafana, graphite, metrics, query, documentation +--- + +# Basic Concepts + +This document is a “bottom up” introduction to basic concepts in Grafana, and can be used as a starting point to get familiar with core features. + + +### ** Data Source ** +Grafana supports many different storage backends for your time series data (Data Source). Each Data Source has a specific Query Editor that is customized for the features and capabilities that the particular Data Source exposes. + +The following datasources are officially supported: [Graphite](/datasources/graphite/), [InfluxDB](/datasources/influxdb/), [OpenTSDB](/datasources/opentsdb/), and [KairosDB](/datasources/kairosdb) + +The query language and capabilities of each Data Source are obviously very different. You can combine data from multiple Data Sources onto a single Dashboard, but each Panel is tied to a specific Data Source that belongs to a particular Organization. + +### ** Organization ** +Grafana supports multiple organizations in order to support a wide variety of deployment models, including using a single Grafana instance to provide service to multiple potentially untrusted Organizations. + +In many cases, Grafana will be deployed with a single Organization. + +Each Organization can have one or more Data Sources. + +All Dashboards are owned by a particular Organization. + + > Note: It is important to remember that most metric databases to not provide any sort of per-user series authentication. Therefore, in Grafana, Data Sources and Dashboards are available to all Users in a particular Organization. + +For more details on the user model for Grafana, please refer to [Admin](/reference/admin/) + +### ** User ** +A User is a named account in Grafana. A user can belong to one or more Organizations, and can be assigned different levels of privileges through roles. + +Grafana supports a wide variety of internal and external ways for Users to authenticate themselves. These include from its own integrated database, from an external SQL server, or from an external LDAP server. + +For more details please refer to [User Auth](/reference/http_api/#users) + +### ** Row ** + +A Row is a logical divider within a Dashboard, and is used to group Panels together. + +Rows are always 12 “units” wide. These units are automatically scaled dependent on the horizontal resolution of your browser. You can control the relative width of Panels within a row by setting their own width. + +We utilize a unit abstraction so that Grafana looks great on all screens both small and huge. + + > Note: With MaxDataPoint functionality, Grafana can show you the perfect amount of datapoints no matter your resolution or time-range. + +Utilize the [Repeating Row functionality](/reference/templating/#utilizing-template-variables-with-repeating-panels-and-repeating-rows) to dynamically create or remove entire Rows (that can be filled with Panels), based on the Template variables selected. + +Rows can be collapsed by clicking on the Row Title. If you save a Dashboard with a Row collapsed, it will save in that state and will not preload those graphs until the row is expanded. + +### ** Panel ** + +The Panel is the basic visualization building block in Grafana. Each Panel provides a Query Editor (dependent on the Data Source selected in the panel) that allows you to extract the perfect visualization to display on the Panel by utilizing the Query Editor + +There are a wide variety of styling and formatting options that each Panel exposes to allow you to create the perfect picture. + +Panels can be dragged and dropped and rearranged on the Dashboard. They can also be resized. + +There are currently four Panel types: [Graph](/reference/graph/), [Singlestat](/reference/singlestat/), [Dashlist](/reference/dashlist/), and [Text](/reference/text/). + +Panels like the [Graph](/reference/graph/) panel allow you to graph as many metrics and series as you want. Other panels like [Singlestat](/reference/singlestat/) require a reduction of a single query into a single number. [Dashlist](/reference/dashlist/) and [Text](/reference/text/) are special panels that do not connect to any Data Source. + +Panels can be made more dynamic by utilizing [Dashboard Templating](/reference/templating/) variable strings within the panel configuration (including queries to your Data Source configured via the Query Editor). + +Utilize the [Repeating Panel](/reference/templating/#utilizing-template-variables-with-repeating-panels-and-repeating-rows) functionality to dynamically create or remove Panels based on the [Templating Variables](/reference/templating/#utilizing-template-variables-with-repeating-panels-and-repeating-rows) selected. + +The time range on Panels is normally what is set in the [Dashboard time picker](/reference/timerange/) but this can be overridden by utilizes [Panel specific time overrides](/reference/timerange/#panel-time-override). + +Panels (or an entire Dashboard) can be [Shared](/reference/sharing/) easily in a variety of ways. You can send a link to someone who has a login to your Grafana. You can use the [Snapshot](/reference/sharing/#snapshots) feature to encode all the data currently being viewed into a static and interactive JSON document; it's so much better than emailing a screenshot! + + +### ** Query Editor ** + +The Query Editor exposes capabilities of your Data Source and allows you to query the metrics that it contains. + +Use the Query Editor to build one or more queries (for one or more series) in your time series database. The panel will instantly update allowing you to effectively explore your data in real time and build a perfect query for that particular Panel. + +You can utilize [Template variables]((reference/templating/) in the Query Editor within the queries themselves. This provides a powerful way to explore data dynamically based on the Templating variables selected on the Dashboard. + +Grafana allows you to reference queries in the Query Editor by the row that they’re on. If you add a second query to graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries. + +### ** Dashboard ** + +The Dashboard is where it all comes together. Dashboards can be thought of as of a set of one or more Panels organized and arranged into one or more Rows. + +The time period for the Dashboard can be controlled by the [Dashboard time picker](/reference/timerange/) in the upper right of the Dashboard. + +Dashboards can utilize [Templating](/reference/templating/) to make them more dynamic and interactive. + +Dashboards can utilize [Annotations](/reference/annotations/) to display event data across Panels. This can help correlate the time series data in the Panel with other events. + +Dashboards (or a specific Panel) can be [Shared](/reference/sharing/) easily in a variety of ways. You can send a link to someone who has a login to your Grafana. You can use the [Snapshot](/reference/sharing/#snapshots) feature to encode all the data currently being viewed into a static and interactive JSON document; it's so much better than emailing a screenshot! + +Dashboards can be tagged, and the Dashboard picker provides quick, searchable access to all Dashboards in a particular Organization. diff --git a/docs/sources/guides/gettingstarted.md b/docs/sources/guides/gettingstarted.md index 4bb250ef55c37..c92f30098ab8c 100644 --- a/docs/sources/guides/gettingstarted.md +++ b/docs/sources/guides/gettingstarted.md @@ -5,17 +5,21 @@ page_keywords: grafana, guide, documentation --- # Getting started -This guide will help you get started and acquainted with the Grafana user interface. It assumes you have a working -Grafana 2.0 instance, and have added at least one Grafana data source. +This guide will help you get started and acquainted with Grafana. It assumes you have a working Grafana 2.x instance, and have added at least one [Data Source](/datasources/overview). ## Beginner guides -Watch the 10min [beginners guide to building dashboards](https://www.youtube.com/watch?v=sKNZMtoSHN4&index=7&list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2) -to get a quick intro to the dashboard & panel editing UI. +Watch the 10min [beginners guide to building dashboards](https://www.youtube.com/watch?v=sKNZMtoSHN4&index=7&list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2) to get a quick intro to setting up Dashboards and Panels. + +##Basic Concepts +Read the [Basic Concepts](/guides/basic_concepts) document to get a crash course in key Grafana concepts. + +### Top header + +Let's start with creating a new Dashboard. You can find the new Dashboard link at the bottom of the Dashboard picker. You now have a blank Dashboard. -## Top header -The image above shows you the top header for a dashboard. +The image above shows you the top header for a Dashboard. 1. Side menubar toggle: This toggles the side menu, allowing you to focus on the data presented in the dashboard. The side menu provides access to features unrelated to a Dashboard such as Users, Organizations, and Data Sources. 2. Dashboard dropdown: This dropdown shows you which Dashboard you are currently viewing, and allows you to easily switch to a new Dashboard. From here you can also create a new Dashboard, Import existing Dashboards, and manage Dashboard playlists. @@ -24,21 +28,9 @@ The image above shows you the top header for a dashboard. 5. Save dashboard: The current Dashboard will be saved with the current Dashboard name. 6. Settings: Manage Dashboard settings and features such as Templating and Annotations. -## Dashboard -Dashboards are at the core of what Grafana is all about. Dashboards are composed of individual Panels arranged on a number of Rows. -By adjusting the display properties of Panels and Rows, you can customize the perfect Dashboard for your exact needs. -Each panel can interact with data from any configured Grafana Data Source (currently InfluxDB, Graphite, OpenTSDB, and KairosDB). -This allows you to create a single dashboard that unifies the data across your organization. Panels use the time range specified -in the main Time Picker in the upper right, but they can also have relative time overrides. +## Dashboards, Panels, Rows, the building blocks of Grafana... +Dashboards are at the core of what Grafana is all about. Dashboards are composed of individual Panels arranged on a number of Rows. Grafana ships with a variety of Panels. Grafana makes it easy to construct the right queries, and customize the display properties so that you can create the perfect Dashboard for your need. Each Panel can interact with data from any configured Grafana Data Source (currently InfluxDB, Graphite, OpenTSDB, and KairosDB). The [Core Concepts](/guides/basic_concepts) guide explores these key ideas in detail. - - -1. Zoom out time range -2. Time picker dropdown. Here you can access relative time range options, auto refresh options and set custom absolute time ranges. -3. Manual refresh button. Will cause all panels to refresh (fetch new data). -4. Row controls menu. Via this menu you can add panels to the row, set row height and more. -5. Dashboard panel. You edit panels by clicking the panel title. -6. Graph legend. You can change series colors, y-axis and series visibility directly from the legend. ## Adding & Editing Graphs and Panels @@ -48,9 +40,22 @@ in the main Time Picker in the upper right, but they can also have relative time 2. To edit the graph you click on the graph title to open the panel menu, then `Edit`. 3. This should take you to the `Metrics` tab. In this tab you should see the editor for your default data source. +When you click the `Metrics` tab, you are presented with a Query Editor that is specific to the Panel Data Source. Use the Query Editor to build your queries and Grafana will visualize them in real time. + + + + + +1. Zoom out time range +2. Time picker dropdown. Here you can access relative time range options, auto refresh options and set custom absolute time ranges. +3. Manual refresh button. Will cause all panels to refresh (fetch new data). +4. Row controls menu. Via this menu you can add panels to the row, set row height and more. +5. Dashboard panel. You edit panels by clicking the panel title. +6. Graph legend. You can change series colors, y-axis and series visibility directly from the legend. + ## Drag-and-Drop panels -You can Drag-and-Drop Panels within and between Rows. Click and hold the Panel title, and drag it to its new location. +You can Drag-and-Drop Panels within and between Rows. Click and hold the Panel title, and drag it to its new location. You can also easily resize panels by clicking the (-) and (+) icons. ![](/img/animated_gifs/drag_drop.gif) diff --git a/docs/sources/guides/screencasts.md b/docs/sources/guides/screencasts.md index d8c605a424536..500ca9f9b6a70 100644 --- a/docs/sources/guides/screencasts.md +++ b/docs/sources/guides/screencasts.md @@ -7,53 +7,60 @@ no_toc: true
-

Episode 1 - Building Graphite Queries

-
+

Episode 1 - Building Graphite Queries

+ Learn how the Graphite Query Editor works, and how to use different graphing functions. There's also an introduction to graph display settings. +
-

Episode 2 - Templated Graphite Queries

-
+

Episode 2 - Templated Graphite Queries

+ The screencast focuses on Templating with the Graphite Data Source. Learn how to make dynamic and adaptable Dashboards for your Graphite metrics. +
- +
-

Episode 3 - Whats New In Grafana 2.0

-
+

Episode 3 - Whats New In Grafana 2.0

+ This screencast highlights many of the great new features that were included in the Grafana 2.0 release. +
-

Episode 4 - Installation & Configuration on Ubuntu / Debian

-
+

Episode 4 - Installation & Configuration on Ubuntu / Debian

+ Learn how to easily install the dependencies and packages to get Grafana 2.0 up and running on Ubuntu or Debian in just a few minutes. +
- +
-

Episode 5 - Installation & Configuration on Redhat / Centos

-
+

Episode 5 - Installation & Configuration on Red Hat / CentOS

+ This screencasts shows how to get Grafana 2.0 installed and configured quickly on RPM-based Linux operating systems. +
-

Episode 6 - Adding data sources, users & organizations

-
+

Episode 6 - Adding data sources, users & organizations

+ Now that Grafana has been installed, learn about adding data sources and get a closer look at adding and managing Users and Organizations. +
- +
-

Episode 7 - Beginners guide to building dashboards

-
+

Episode 7 - Beginners guide to building dashboards

+ For newer users of Grafana, this screencast will familiarize you with the general UI and teach you how to build your first Dashboard. +
diff --git a/docs/sources/guides/whats-new-in-v2-1.md b/docs/sources/guides/whats-new-in-v2-1.md index 36289ac904194..10519cb042216 100644 --- a/docs/sources/guides/whats-new-in-v2-1.md +++ b/docs/sources/guides/whats-new-in-v2-1.md @@ -4,94 +4,127 @@ page_description: What's new in Grafana v2.1 page_keywords: grafana, new, changes, features, documentation --- -#What's new in Grafana v2.1 +# What's new in Grafana v2.1 +Grafana 2.1 brings improvements in three core areas: dashboarding, authentication, and data sources. +As with every Grafana release, there is a whole slew of new features, enhancements, and bug fixes. -##More Dynamic Dashboards -The Templating system is one of the most powerful and well-used features of Grafana. The 2.1 release brings numerous improvements that make Dashboards more dynamic than ever before. +## More Dynamic Dashboards +The Templating system is one of the most powerful and well-used features of Grafana. +The 2.1 release brings numerous improvements that make dashboards more dynamic than ever before. -### Multi-Value Template Select -Multi-Value Select allows for the selection of multiple template variables. -These variables can be used in any Panel to make them more dynamic, and automatically show only the desired data. -Multi-Value Select is also a basis for enabling Repeating Rows and Repeating Panels. +### Multi-Value Template Variables +A template variable with Multi-Value enabled allows for the selection of multiple values at the same time. +These variables can then be used in any Panel to make them more dynamic, and to give you the perfect view of your data. +Multi-Value variables is also enabling the new `row repeat` and `panel repeat` feature described below. ![Multi-Value Select](/img/v2/multi-select.gif "Multi-Value Select")

### Repeating Rows and Panels -It’s now possible to create a Dashboard that automatically adds (and removes) both Rows and Panels based on which Template variables you have selected. -Any Row or Any Panel can be configured to repeat (duplicate itself) based on the Multi-Value Template variables selected. +It’s now possible to create a dashboard that automatically adds (or removes) both rows and panels based +on selected variable values. Any row or any panel can be configured to repeat (duplicate itself) based +on a multi-value template variable.

![Repeating Rows and Panels](/img/v2/panel-row-repeat.gif "Repeating Rows and Panels")

-### Dashboard Links -To support better navigation between Dashboarads, it is possible to create custom and dynamic links from individual Panels to appropriate Dashboards (1888) +### Dashboard Links & Navigation +To support better navigation between dashboards, it's now possible to create custom and dynamic links from individual +panels to appropriate Dashboards. You also have the ability to create flexible top-level links on any +given dashboard thanks to the new dashboard navigation bar feature. -![Dashboard Links](/img/v2/panel-link.png "Dashboard Links") -

+![Dashboard Links](/img/v2/dash_links.png "Dashboard Links") + +Dashboard links can be added under dashboard settings. Either defined as static URLs with a custom icon or as dynamic +dashboard links or dropdowns based on custom dashboard search query. These links appear in the same +row under the top menu where template variables appear. + +- - - ### Better local Dashboard support -Grafana can now index Dashboards saved locally as JSON from a given directory. +Grafana can now index Dashboards saved locally as JSON from a given directory. These file based dashboards +will appear in the regular dashboard search along regular DB dashboards. > ***Note:*** Saving local dashboards back the folder is not supported; this feature is meant for statically generated JSON dashboards. - - - -## Improved authentication engine -New authentication methods add numerous options to manage users, roles and organizations. Grafana 2.1 also includes a "Read-only Editor" role which disables the query editor for that user. +## New Authentication Options +New authentication methods add numerous options to manage users, roles and organizations. ### LDAP support -This highly requested feature now allows your Grafana users to login with their LDAP credentials. You can also specify mappings between LDAP group memberships and Grafana Organization user roles. +This highly requested feature now allows your Grafana users to login with their LDAP credentials. +You can also specify mappings between LDAP group memberships and Grafana Organization user roles. -### Basic Auth support +### Basic Auth Support You can now authenticate against the Grafana API utilizing a simple username and password with basic HTTP authentication. -> ***Note:*** This can be useful for provisioning and config management systems that need to utilize the API without having to create an API key. +> ***Note:*** This can be useful for provisioning and config management systems that need +> to utilize the API without having to create an API key. -### User authentication utilizing headers +### Auth Proxy Support You can now authenticate utilizing a header (eg. X-Authenticated-User, or X-WEBAUTH-USER) > ***Note:*** this can be useful in situations with reverse proxies. ### New “Read-only Editor” User Role -There is a new User role available in this version of Grafana: “Read-only Editor”. This role behaves just like the Viewer role does in Grafana 2.0. -That is you can edit graphs and queries but no save dashboards. The Viewer role has been modified in Grafana 2.1 so that users assigned this role -can no longer edit panels. +There is a new User role available in this version of Grafana: “Read-only Editor”. This role behaves just +like the Viewer role does in Grafana 2.0. That is you can edit graphs and queries but not save dashboards. +The Viewer role has been modified in Grafana 2.1 so that users assigned this role can no longer edit panels. - - - -## Improved data source support +## Data source Improvements -### Improved Data Sources -We continue to make progress on fully supporting InfluxDB 0.9, but it has proven to be a bit of a moving target. This Grafana release brings a much improved query editor for InfluxDB 0.9 +### InfluxDB 0.9 Support +Grafana 2.1 now comes with full support for InfluxDB 0.9. There is a new query editor designed from scratch +for the new features InfluxDB 0.9 enables. -![InfluxDB Support](/img/v2/influx-query.gif "InfluxDB Support") -

+![InfluxDB Editor](/img/v2/influx_09_editor_anim.gif "InfluxDB Editor") +
-### OpenTSDB Data Source improvements -Grafana now supports template variable values lookup queries, as well as limiting tags by metric +### OpenTSDB Improvements +Grafana OpenTSDB data source now supports template variable values queries. This means you can create +template variables that fetches the values from OpenTSDB (for example metric names, tag names, or tag values). +The query editor is also enhanced to limiting tags by metric. > ***Note:*** OpenTSDB config option tsd.core.meta.enable_realtime_ts must enabled for OpenTSDB lookup api) - ### New Data Source: KairosDB -Experimental support for the KairosDB is now shipping in Grafana. Thank you to < > for their hard work in getting it to this point. +The Cassandra backed time series database KairosDB is now supported in Grafana out of the box. Thank you to +masaori335 for his hard work in getting it to this point. - - - ## Panel Improvements +Grafana 2.1 gives you even more flexibility customizing how individual panels render. +Overriding the colors of specific series using regular expressions, changing how series stack, +and allowing string values will help you better understand your data at a glance. + ### Graph Panel -Define series color using regex rule -![Define series color using regex rule ](/img/v2/regex_color.gif "Define series color using regex rule ") +Define series color using regex rule. This is useful when you have templated graphs with series names +that change depending selected template variables. Using a regex style override rule you could +for example make all series that contain the word **CPU** `red` and assigned to the second y axis. + +![Define series color using regex rule](/img/v2/regex_color_override.png "Define series color using regex rule") + +New series style override, negative-y transform and stack groups. Negative y transform is +very useful if you want to plot a series on the negative y scale without affecting the legend values like min or max or +the values shown in the hover tooltip. -New series style override, negative-y transform and stack groups ![Negative-y Transform](/img/v2/negative-y.png "Negative-y Transform") ![Negative-y Transform](/img/v2/negative-y-form.png "Negative-y Transform") ### Singlestat Panel -Now support string values - read more about [Singlestat Panels](../reference/singlestat.md) +Now support string values. Useful for time series database like InfluxDB that supports +string values. + +### Changelog +For a detailed list and link to github issues for everything included in the 2.1 release please +view the [CHANGELOG.md](https://github.com/grafana/grafana/blob/master/CHANGELOG.md) file. + diff --git a/docs/sources/guides/whats-new-in-v2-5.md b/docs/sources/guides/whats-new-in-v2-5.md new file mode 100644 index 0000000000000..f02d0d43fd043 --- /dev/null +++ b/docs/sources/guides/whats-new-in-v2-5.md @@ -0,0 +1,105 @@ +--- +page_title: What's New in Grafana v2.5 +page_description: What's new in Grafana v2.5 +page_keywords: grafana, new, changes, features, documentation +--- + +# What's new in Grafana v2.5 + +## Release highlights +This is an exciting release, and we want to share some of the highlights. The release includes many +fixes and enhancements to all areas of Grafana, like new Data Sources, a new and improved timepicker, user invites, panel +resize handles and improved InfluxDB and OpenTSDB support. + +### New time range controls +New Time picker + +A new timepicker with room for more quick ranges as well as new types of relative ranges, like `Today`, +`The day so far` and `This day last week`. Also an improved time & calendar picker that now works +correctly in UTC mode. + +### Elasticsearch + +Elasticsearch example +
+ +This release brings a fully featured query editor for Elasticsearch. You will now be able to visualize +logs or any kind of data stored in Elasticserarch. The query editor allows you to build both simple +and complex queries for logs or metrics. + +- Compute metrics from your documents, supported Elasticsearch aggregations: + - Count, Avg, Min, Max, Sum + - Percentiles, Std Dev, etc. +- Group by multiple terms or filters + - Specify group by options like Top 5 based on Avg @value +- Auto completion for field names +- Query only relevant indices based on time pattern +- Alias patterns for short readable series names + +Try the new Elasticsearch query editor on the [play.grafana.org](http://play.grafana.org/dashboard/db/elasticsearch-metrics) site. + +### CloudWatch + +Cloudwatch editor + +Grafana 2.5 ships with a new CloudWatch datasource that will allow you to query and visualize CloudWatch +metrics directly from Grafana. + +- Rich editor with auto completion for metric names, namespaces and dimensions +- Templating queries for generic dashboards +- Alias patterns for short readable series names + +### Prometheus + +Prometheus editor + +Grafana 2.5 ships with a new Prometheus datasource that will allow you to query and visualize data +stored in Prometheus. + + +### Mix different data sources +Mix data sources in the same dashboard or in the same graph! + +In previous releases you have been able to mix different data sources on the same dashboard. In v2.5 you +will be able to mix then on the same graph! You can enable this by selecting the built in `-- Mixed --` data source. +When selected this will allow you to specify data source on a per query basis. This will, for example, allow you +to plot metrics from different Graphite servers on the same Graph or plot data from Elasticsearch alongside +data from Prometheus. Mixing different data sources on the same graph works for any data source, even custom ones. + +### Panel Resize handles + + +This release adds resize handles to the the bottom right corners of panels making is easy to resize both width and height. + +### User invites + + +This version also brings some new features for user management. + +- Organization admins can now invite new users (via email or manually via invite link) +- Users can signup using invite link and get automatically added to invited organization +- User signup workflow can (if enabled) contain an email verification step. +- Check out [#2353](https://github.com/grafana/grafana/issues/2353) for more info. + +### Miscellaneous improvements + +- InfluxDB query editor now supports math and AS expressions +- InfluxDB query editor now supports custom group by interval +- Panel drilldown link is easier to reach +- LDAP improvements (can now search for group membership if your LDAP server does not support memberOf attribute) +- More units for graph and singlestat panel (Length, Volume, Temperature, Pressure, Currency) +- Admin page for all organizations (remove / edit) + +### Breaking changes +There have been some changes to the data source plugin API. If you are using a custom plugin check that there is an update for it before you upgrade. Also +the new time picker does not currently support custom quick ranges like the last one did. This will likely be added in a +future release. + +### Changelog +For a detailed list and link to github issues for everything included in the 2.5 release please +view the [CHANGELOG.md](https://github.com/grafana/grafana/blob/master/CHANGELOG.md) file. + +- - - + +### Download Grafana 2.5 now + diff --git a/docs/sources/guides/whats-new-in-v2.md b/docs/sources/guides/whats-new-in-v2.md index a388aef61600c..9e87ab55e6e11 100644 --- a/docs/sources/guides/whats-new-in-v2.md +++ b/docs/sources/guides/whats-new-in-v2.md @@ -72,17 +72,17 @@ When you zoom or change the Dashboard time to a custom absolute time range, all The `Hide time override info` option allows you to hide the the override info text that is by default shown in the upper right of a panel when overridden time range options. -Currently you can only override the dashboard time with relative time ranges, not absolute time ranges. +Currently you can only override the dashboard time with relative time ranges, not absolute time ranges. ## Panel iframe embedding -You can embed a single panel on another web page or your own application using the panel share dialog. +You can embed a single panel on another web page or your own application using the panel share dialog. -Below you should see an iframe with a graph panel (taken from a Dashboard snapshot at [snapshot.raintank.io](http://snapshot.raintank.io). +Below you should see an iframe with a graph panel (taken from a Dashboard snapshot at [snapshot.raintank.io](http://snapshot.raintank.io). Try hovering or zooming on the panel below! - + This feature makes it easy to include interactive visualizations from your Grafana instance anywhere you want. @@ -130,7 +130,7 @@ latency, network traffic, and storage) ![](/img/v2/dashlist_starred.png) -The dashlist is a new panel in Grafana v2.0. It allows you to show your personal starred dashboards, as well as do custom searches based on search strings or tags. +The dashlist is a new panel in Grafana v2.0. It allows you to show your personal starred dashboards, as well as do custom searches based on search strings or tags. dashlist is used on the new Grafana Home screen. It is included as a reference Panel and is useful to provide basic linking between Dashboards. @@ -138,7 +138,7 @@ dashlist is used on the new Grafana Home screen. It is included as a reference P Data sources in Grafana v2.0 are no longer defined in a config file. Instead, they are added through the UI or the HTTP API. -The backend can now proxy data from Data Sources, which means that it is a lot easier to get started using Grafana with Graphite or OpenTSDB without having to spend time with CORS (Cross origin resource sharing) work-arounds. +The backend can now proxy data from Data Sources, which means that it is a lot easier to get started using Grafana with Graphite or OpenTSDB without having to spend time with CORS (Cross origin resource sharing) work-arounds. In addition, connections to Data Sources can be better controlled and secured, and authentication information no longer needs to be exposed to the browser. @@ -146,14 +146,14 @@ In addition, connections to Data Sources can be better controlled and secured, a A commonly reported problem has been graphs dipping to zero at the the end, because metric data for the last interval has yet to be written to the Data Source. These graphs then "self correct" once the data comes in, but can look deceiving or alarming at times. -You can avoid this problem by adding a `now delay` in `Dashboard Settings` > `Time Picker` tab. This new feature will cause Grafana to ignore the most recent data up to the set delay. +You can avoid this problem by adding a `now delay` in `Dashboard Settings` > `Time Picker` tab. This new feature will cause Grafana to ignore the most recent data up to the set delay. ![](/img/v2/timepicker_now_delay.jpg) The delay that may be necessary depends on how much latency you have in your collection pipeline. ## Dashboard overwrite protection -Grafana v2.0 protects Users from accidentally overwriting each others Dashboard changes. Similar protections are in place if you try to create a new Dashboard with the same name as an existing one. +Grafana v2.0 protects Users from accidentally overwriting each others Dashboard changes. Similar protections are in place if you try to create a new Dashboard with the same name as an existing one. ![](/img/v2/overwrite_protection.jpg) diff --git a/docs/sources/index.md b/docs/sources/index.md index 491ff31377d66..f595c81ef284d 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -10,9 +10,10 @@ It provides a powerful and elegant way to create, share, and explore data and da Grafana is most commonly used for Internet infrastructure and application analytics, but many use it in other domains including industrial sensors, home automation, weather, and process control. -Grafana features pluggable panels and data sources allowing easy extensibility. There is currently rich support for [Graphite](http://graphite.readthedocs.org/en/latest/), [InfluxDB](http://influxdb.org) and [OpenTSDB](http://opentsdb.net). There is also experimental support for [KairosDB](https://github.com/kairosdb/kairosdb), and SQL is on the roadmap. Grafana has a variety of panels, including a fully featured graph panel with rich visualization options. +Grafana features pluggable panels and data sources allowing easy extensibility. There is currently rich support for [Graphite](http://graphite.readthedocs.org/en/latest/), [InfluxDB](http://influxdb.org) and [OpenTSDB](http://opentsdb.net). There is also experimental support for [KairosDB](https://github.com/kairosdb/kairosdb), [Prometheus](http://prometheus.io/), and SQL is on the roadmap. Grafana has a variety of panels, including a fully featured graph panel with rich visualization options. -Version 2.0 was released in April 2015: Grafana now ships with its own backend server that brings [many changes and features](../guides/whats-new-in-v2/). +Version 2.0 was released in April 2015: Grafana now ships with its own backend server that brings [many changes and features](../guides/whats-new-in-v2/). +Version 2.1 was released in July 2015 and added [even more features and enhancements](../guides/whats-new-in-v2-1/). ## Community Resources, Feedback, and Support @@ -24,7 +25,7 @@ Most of the new features and improvements that go into Grafana come from our use If you have any trouble with Grafana, whether you can't get it set up or you just want clarification on a feature, there are a number of ways to get help: -- [Troubleshooting guide](../troubleshooting) +- [Troubleshooting guide](/installation/troubleshooting/) - \#grafana IRC channel on the freenode network (chat.freenode.net) - Search closed and open [issues on GitHub](https://github.com/grafana/grafana/issues) - [Mailing list](https://groups.io/org/groupsio/grafana) diff --git a/docs/sources/installation/configuration.md b/docs/sources/installation/configuration.md index e35bd164ba45b..e68edcbca0cd2 100644 --- a/docs/sources/installation/configuration.md +++ b/docs/sources/installation/configuration.md @@ -18,7 +18,7 @@ specified in a `.ini` configuration file or specified using environment variable > **Note.** If you have installed Grafana using the `deb` or `rpm` > packages, then your configuration file is located at > `/etc/grafana/grafana.ini`. This path is specified in the Grafana -> init.d script using `-config` file parameter. +> init.d script using `--config` file parameter. ## Using environment variables @@ -37,12 +37,12 @@ should be upper case, `.` should be replaced by `_`. For example, given these co client_secret = 0ldS3cretKey -Then you can override that using: +Then you can override them using: export GF_SECURITY_ADMIN_USER=true export GF_AUTH_GOOGLE_CLIENT_SECRET=newS3cretKey -
+
## [paths] @@ -63,7 +63,7 @@ file. ### http_addr -The IP address to bind to, if empty will bind to all interfaces +The IP address to bind to. If empty will bind to all interfaces ### http_port @@ -116,9 +116,9 @@ Path to the certificate file (if `protocol` is set to `https`). Path to the certificate key file (if `protocol` is set to `https`). -
+
-
+
## [database] @@ -158,7 +158,7 @@ The database user's password (not applicable for `sqlite3`). For `postgres` only, either `disable`, `require` or `verify-full`. -
+
## [security] @@ -184,7 +184,7 @@ Used for signing keep me logged in / remember me cookies. Set to `true` to disable the use of Gravatar for user profile images. Default is `false`. -
+
## [users] @@ -266,7 +266,7 @@ automatically signed up. ### team_ids Require an active team membership for at least one of the given teams on -GitHub. If the authenticated user isn't a member of at least one the +GitHub. If the authenticated user isn't a member of at least one of the teams they will not be able to register or authenticate with your Grafana instance. For example: @@ -274,7 +274,7 @@ Grafana instance. For example: enabled = true client_id = YOUR_GITHUB_APP_CLIENT_ID client_secret = YOUR_GITHUB_APP_CLIENT_SECRET - scopes = user:email + scopes = user:email,read:org team_ids = 150,300 auth_url = https://github.com/login/oauth/authorize token_url = https://github.com/login/oauth/access_token @@ -318,19 +318,36 @@ automatically signed up.
## [auth.basic] -### enable -When enable is `true` (default) the http api will accept basic authentication. +### enabled +When enabled is `true` (default) the http api will accept basic authentication.
## [auth.ldap] -### enable -Set to `true` to enable ldap integration (default: `false`) +### enabled +Set to `true` to enable LDAP integration (default: `false`) ### config_file -Path to the ldap specific configuration file (default: `/etc/grafana/ldap.toml`) +Path to the LDAP specific configuration file (default: `/etc/grafana/ldap.toml`) + +> For details on LDAP Configuration, go to the [LDAP Integration](ldap.md) page. -> For detail on LDAP Configuration, go to the [Ldap Integration](ldap.md) page. +
+ +## [auth.proxy] +This feature allows you to handle authentication in a http reverse proxy. + +### enabled +Defaults to `false` + +### header_name +Defaults to X-WEBAUTH-USER + +#### header_property +Defaults to username but can also be set to email + +### auto_sign_up +Set to `true` to enable auto sign up of users who do not exist in Grafana DB. Defaults to `true`.
@@ -367,29 +384,30 @@ The name of the Grafana session cookie. ### cookie_secure -Set to true if you host Grafana behind HTTPs only. Defaults to `false`. +Set to true if you host Grafana behind HTTPS only. Defaults to `false`. ### session_life_time How long sessions lasts in seconds. Defaults to `86400` (24 hours). -
+
## [analytics] ### reporting_enabled -When enabled Grafana will send anonymous usage statistics to `stats.grafana.org`. -No IP addresses are being tracked, only simple counters to track running instances, -versions, dashboard & error counts. It is very helpful to us, please leave this -enabled. Counters are sent every 24 hours. Default value is `true`. +When enabled Grafana will send anonymous usage statistics to +`stats.grafana.org`. No IP addresses are being tracked, only simple counters to +track running instances, versions, dashboard & error counts. It is very helpful +to us, so please leave this enabled. Counters are sent every 24 hours. Default +value is `true`. ### google_analytics_ua_id -If you want to track Grafana usage via Google analytics specify *your* Universal Analytics ID -here. By default this feature is disabled. +If you want to track Grafana usage via Google analytics specify *your* Universal +Analytics ID here. By default this feature is disabled. -
+
## [dashboards.json] diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md index 07a7d451626c1..4a734ac4400d7 100644 --- a/docs/sources/installation/debian.md +++ b/docs/sources/installation/debian.md @@ -10,15 +10,16 @@ page_keywords: grafana, installation, debian, ubuntu, guide Description | Download ------------ | ------------- -.deb for Debian-based Linux | [grafana_2.0.2_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_2.0.2_amd64.deb) +.deb for Debian-based Linux | [grafana_2.5.0_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_2.5.0_amd64.deb) ## Install - $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_2.0.2_amd64.deb + $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_2.5.0_amd64.deb $ sudo apt-get install -y adduser libfontconfig - $ sudo dpkg -i grafana_2.0.2_amd64.deb + $ sudo dpkg -i grafana_2.5.0_amd64.deb ## APT Repository + Add the following line to your `/etc/apt/sources.list` file. deb https://packagecloud.io/grafana/stable/debian/ wheezy main @@ -57,7 +58,7 @@ HTTPS. ## Start the server (init.d service) -You can start Grafana by running: +Start Grafana by running: $ sudo service grafana-server start @@ -71,7 +72,7 @@ To configure the Grafana server to start at boot time: ## Start the server (via systemd) -To start the service using systemd. +To start the service using systemd: $ systemctl daemon-reload $ systemctl start grafana-server @@ -96,7 +97,7 @@ By default Grafana will log to `/var/log/grafana` The default configuration specifies a sqlite3 database located at `/var/lib/grafana/grafana.db`. Please backup this database before -upgrades. You can also use MySQL or Postgres as the Grafana database. +upgrades. You can also use MySQL or Postgres as the Grafana database, as detailed on [the configuration page](configuration.md#database). ## Configuration @@ -112,17 +113,16 @@ those options. ## Installing from binary tar file -Start by [downloading](http://grafana.org/download/builds) the latest -`.tar.gz` file and extract it. This will extract into a folder named -after the version you downloaded. This folder contains all files -required to run Grafana. There are no init scripts or install scripts -in this package. +Download [the latest `.tar.gz` file](http://grafana.org/download/builds) and +extract it. This will extract into a folder named after the version you +downloaded. This folder contains all files required to run Grafana. There are +no init scripts or install scripts in this package. To configure Grafana add a configuration file named `custom.ini` to the `conf` folder and override any of the settings defined in `conf/defaults.ini`. -Start Grafana by executing `./grafana web`. The `grafana` binary needs +Start Grafana by executing `./grafana-server web`. The `grafana-server` binary needs the working directory to be the root install directory (where the binary and the `public` folder is located). diff --git a/docs/sources/installation/docker.md b/docs/sources/installation/docker.md index c11de0faa7bad..18dcf96445072 100644 --- a/docs/sources/installation/docker.md +++ b/docs/sources/installation/docker.md @@ -6,7 +6,10 @@ page_keywords: grafana, installation, docker, container, guide # Installing using Docker -## Install from offical docker image +> **2.0.2 -> 2.1.0 Upgrade NOTICE!** +> The data and log paths were not correct in the previous image. The grafana database was placed by default in /usr/share/grafana/data instead of the correct path /var/lib/grafana. This means it was not in a dir that was marked as a volume. So if you remove the container it will remove the grafana database. So before updating make sure you copy the /usr/share/grafana/data path from inside the container to the host. + +## Install from official docker image Grafana has an official Docker container. diff --git a/docs/sources/installation/index.md b/docs/sources/installation/index.md index 8116a44d2bd65..21c8c127b469e 100644 --- a/docs/sources/installation/index.md +++ b/docs/sources/installation/index.md @@ -9,7 +9,7 @@ page_keywords: grafana, installation, documentation Grafana is easily installed via a Debian/Ubuntu package (.deb), via Redhat/Centos package (.rpm) or manually via a tarball that contains all required files and binaries. If you can't find a package or binary for -your platform you might be able to build one your self, read the [build +your platform, you might be able to build one yourself. Read the [build from source](../project/building_from_source) instructions for more information. diff --git a/docs/sources/installation/ldap.md b/docs/sources/installation/ldap.md index 4cba472d053c5..82309ec0f19aa 100644 --- a/docs/sources/installation/ldap.md +++ b/docs/sources/installation/ldap.md @@ -1,17 +1,17 @@ --- page_title: LDAP Integration -page_description: LDAP Integrtaion guide for Grafana. +page_description: LDAP Integration guide for Grafana. page_keywords: grafana, ldap, configuration, documentation, integration --- # LDAP Integration -Grafana 2.1 ships with strong LDAP integration feature. The LDAP integration in Grafan allows your -Grafan users to login with their LDAP credentials. You can also specify mappings between LDAP +Grafana 2.1 ships with a strong LDAP integration feature. The LDAP integration in Grafana allows your +Grafana users to login with their LDAP credentials. You can also specify mappings between LDAP group memberships and Grafana Organization user roles. ## Configuration -You turn on ldap in the [main config file](configuration/#authldap) as well as specify the path to the ldap +You turn on LDAP in the [main config file](../configuration/#authldap) as well as specify the path to the LDAP specific configuration file (default: `/etc/grafana/ldap.toml`). ### Example config @@ -21,7 +21,7 @@ specific configuration file (default: `/etc/grafana/ldap.toml`). verbose_logging = false [[servers]] -# Ldap server host +# Ldap server host (specify multiple hosts space separated) host = "127.0.0.1" # Default port is 389 or 636 if use_ssl = true port = 389 @@ -29,17 +29,29 @@ port = 389 use_ssl = false # set to true if you want to skip ssl cert validation ssl_skip_verify = false +# set to the path to your root CA certificate or leave unset to use system defaults +# root_ca_cert = /path/to/certificate.crt # Search user bind dn bind_dn = "cn=admin,dc=grafana,dc=org" # Search user bind password -bind_password = "grafana" +bind_password = 'grafana' -# Search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" +# User search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)" search_filter = "(cn=%s)" + # An array of base dns to search through search_base_dns = ["dc=grafana,dc=org"] +# In POSIX LDAP schemas, without memberOf attribute a secondary query must be made for groups. +# This is done by enabling group_search_filter below. You must also set member_of= "cn" +# in [servers.attributes] below. + +## Group search filter, to retrieve the groups of which the user is a member (only set if memberOf attribute is not available) +# group_search_filter = "(&(objectClass=posixGroup)(memberUid=%s))" +## An array of the base DNs to search through for groups. Typically uses ou=groups +# group_search_base_dns = ["ou=groups,dc=grafana,dc=org"] + # Specify names of the ldap attributes your ldap uses [servers.attributes] name = "givenName" @@ -55,7 +67,7 @@ org_role = "Admin" # The Grafana organization database id, optional, if left out the default org (id 1) will be used # org_id = 1 -[[server.ldap_group_to_org_role_mappings]] +[[servers.group_mappings]] group_dn = "cn=users,dc=grafana,dc=org" org_role = "Editor" @@ -63,11 +75,12 @@ org_role = "Editor" # If you want to match all (or no ldap groups) then you can use wildcard group_dn = "*" org_role = "Viewer" + ``` ## Bind & Bind Password -By default the configuration expects you to specify a bind DN and bind password. This should be a read only user that can perform ldap searches. +By default the configuration expects you to specify a bind DN and bind password. This should be a read only user that can perform LDAP searches. When the user DN is found a second bind is performed with the user provided username & password (in the normal Grafana login form). ``` @@ -75,21 +88,43 @@ bind_dn = "cn=admin,dc=grafana,dc=org" bind_password = "grafana" ``` -### Single bind Example +### Single Bind Example -If you can provide a single bind expression that matches all possible users you can skip the second bind and bind against the user DN directly. +If you can provide a single bind expression that matches all possible users, you can skip the second bind and bind against the user DN directly. This allows you to not specify a bind_password in the configuration file. ``` bind_dn = "cn=%s,o=users,dc=grafana,dc=org" ``` -In this case you skip providing a `bind_password` and instead provide a `bind_dn` value with a `%s` somewhere. This will be replaced with the username -entered in on the Grafana login page. The search filter and search bases settings are still needed to perform the ldap search to retreive the other ldap -information (like ldap groups and email). +In this case you skip providing a `bind_password` and instead provide a `bind_dn` value with a `%s` somewhere. This will be replaced with the username entered in on the Grafana login page. +The search filter and search bases settings are still needed to perform the LDAP search to retrieve the other LDAP information (like LDAP groups and email). + +## POSIX schema (no memberOf attribute) +If your ldap server does not support the memberOf attribute add these options: + +```toml +## Group search filter, to retrieve the groups of which the user is a member (only set if memberOf attribute is not available) +group_search_filter = "(&(objectClass=posixGroup)(memberUid=%s))" +## An array of the base DNs to search through for groups. Typically uses ou=groups +group_search_base_dns = ["ou=groups,dc=grafana,dc=org"] +``` + +Also change set `member_of = "cn"` in the `[servers.attributes]` section. + + +## LDAP to Grafana Org Role Sync + +### Mappings +In `[[servers.group_mappings]]` you can map an LDAP group to a Grafana organization +and role. These will be synced every time the user logs in, with LDAP being +the authoritative source. So, if you change a user's role in the Grafana Org. +Users page, this change will be reset the next time the user logs in. If you +change the LDAP groups of a user, the change will take effect the next +time the user logs in. + +### Priority +The first group mapping that an LDAP user is matched to will be used for the sync. If you have LDAP users that fit multiple mappings, the topmost mapping in the TOML config will be used. + -## Ldap to Grafana Org Role Sync -In the `[[servers.group_mappings]]` you can map a LDAP group to a grafana organization and role. These will be synced every time the user logs in. So -if you change a users role in the Grafana Org. Users page, this change will be reset the next time the user logs in. Similarly if you -can LDAP groups for a user in LDAP the change will take effect the next time the user logs in to Grafana. diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index 48f8ca075a37b..7246033b64068 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -6,7 +6,7 @@ page_keywords: grafana, installation, mac, osx, guide # Installing on Mac -There is currently no binary build for Mac. But read the [build from +There is currently no binary build for Mac, but Grafana will happily build on Mac. Read the [build from source](/project/building_from_source) page for instructions on how to build it yourself. diff --git a/docs/sources/installation/performance.md b/docs/sources/installation/performance.md index 6dcc38b5f7882..f09686687ca49 100644 --- a/docs/sources/installation/performance.md +++ b/docs/sources/installation/performance.md @@ -8,15 +8,15 @@ page_keywords: grafana, performance, documentation ## Graphite -Graphite 0.9.13 adds a much needed feature to the JSON rendering API +Graphite 0.9.14 adds a much needed feature to the JSON rendering API that is very important for Grafana. If you are experiencing slow load & rendering times for large time ranges then it is most likely caused by running Graphite 0.9.12 or lower. The latest version of Graphite adds a `maxDataPoints` parameter to the -JSON render API, without this feature Graphite can return hundreds of +JSON render API. Without this feature Graphite can return hundreds of thousands of data points per graph, which can hang your browser. Be sure to upgrade to -[0.9.13](http://graphite.readthedocs.org/en/latest/releases/0_9_13.html). +[0.9.14](http://graphite.readthedocs.org/en/latest/releases/0_9_14.html). diff --git a/docs/sources/installation/provisioning.md b/docs/sources/installation/provisioning.md index ea57b0d00ddf6..2ec79d5474109 100644 --- a/docs/sources/installation/provisioning.md +++ b/docs/sources/installation/provisioning.md @@ -10,25 +10,21 @@ Here are links for how to install Grafana (and some include Graphite or InfluxDB as well) via a provisioning system. These are not maintained by any core Grafana team member and might be out of date. -## Puppet +Some of the linked cookbooks/manifests/etc. will install and configure Grafana 2.x, while some will only install the older Grafana 1.x versions. They've been broken apart below for your convenience. -* [forge.puppetlabs.com/bfraser/grafana](https://forge.puppetlabs.com/bfraser/grafana) +### Puppet -## Ansible +* [forge.puppetlabs.com/bfraser/grafana](https://forge.puppetlabs.com/bfraser/grafana) **Note:** The current version works with Grafana 2.x. To install older versions of Grafana use the 1.x series of releases. -* [github.com/bobrik/ansible-grafana](https://github.com/bobrik/ansible-grafana) -* [github.com/bitmazk/ansible-digitalocean-influxdb-grafana](https://github.com/bitmazk/ansible-digitalocean-influxdb-grafana) -* [github.com/picotrading/ansible-grafana](https://github.com/picotrading/ansible-grafana) +### Ansible -## Docker +* [github.com/picotrading/ansible-grafana](https://github.com/picotrading/ansible-grafana) -* [github.com/kamon-io/docker-grafana-graphite](https://github.com/kamon-io/docker-grafana-graphite) -* [github.com/kamon-io/docker-grafana-influxdb](https://github.com/kamon-io/docker-grafana-influxdb) -* [github.com/tutumcloud/tutum-docker-grafana](https://github.com/tutumcloud/tutum-docker-grafana) -* [github.com/mingfang/docker-grafana](https://github.com/mingfang/docker-grafana) +### Docker +* [github.com/grafana/grafana-docker](https://github.com/grafana/grafana-docker) -## Chef +### Chef -* [github.com/JonathanTron/chef-grafana](https://github.com/JonathanTron/chef-grafana) -* [github.com/dzautner/grafana-cookbook](https://github.com/dzautner/grafana-cookbook) +* [github.com/JonathanTron/chef-grafana](https://github.com/JonathanTron/chef-grafana) **Note:** The current version works with Grafana 2.x. To install older versions of Grafana use the 1.x series of releases. +* [github.com/Nordstrom/grafana2-cookbook](https://github.com/Nordstrom/grafana2-cookbook) diff --git a/docs/sources/installation/rpm.md b/docs/sources/installation/rpm.md index ab0f5e3be248a..464faa09b9deb 100644 --- a/docs/sources/installation/rpm.md +++ b/docs/sources/installation/rpm.md @@ -1,6 +1,6 @@ --- page_title: Installing on RPM-based Linux -page_description: Grafana Installation guide for Centos, Fedora, Redhat. +page_description: Grafana Installation guide for Centos, Fedora, OpenSuse, Redhat. page_keywords: grafana, installation, centos, fedora, opensuse, redhat, guide --- @@ -10,18 +10,24 @@ page_keywords: grafana, installation, centos, fedora, opensuse, redhat, guide Description | Download ------------ | ------------- -.RPM for Fedora / RHEL / CentOS Linux | [grafana-2.0.2-1.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-2.0.2-1.x86_64.rpm) +.RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [grafana-2.5.0-1.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-2.5.0-1.x86_64.rpm) ## Install from package file You can install Grafana using Yum directly. - $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-2.0.2-1.x86_64.rpm + $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-2.5.0-1.x86_64.rpm Or install manually using `rpm`. +#### On CentOS / Fedora / Redhat: + $ sudo yum install initscripts fontconfig - $ sudo rpm -Uvh grafana-2.0.1-1.x86_64.rpm + $ sudo rpm -Uvh grafana-2.5.0-1.x86_64.rpm + +#### On OpenSuse: + + $ sudo rpm -i --nodeps grafana-2.5.0-1.x86_64.rpm ## Install via YUM Repository @@ -100,7 +106,7 @@ By default Grafana will log to `/var/log/grafana` The default configuration specifies a sqlite3 database located at `/var/lib/grafana/grafana.db`. Please backup this database before -upgrades. You can also use MySQL or Postgres as the Grafana database. +upgrades. You can also use MySQL or Postgres as the Grafana database, as detailed on [the configuration page](configuration.md#database). ## Configuration diff --git a/docs/sources/installation/troubleshooting.md b/docs/sources/installation/troubleshooting.md index 7a6f8a9d8ee3e..7a6865b9da153 100644 --- a/docs/sources/installation/troubleshooting.md +++ b/docs/sources/installation/troubleshooting.md @@ -18,8 +18,8 @@ an error like this: ![](/img/v1/graph_timestore_error.png) -For some type of errors the `View details` link will show you error -details. But for many types of HTTP connection errors there is very +For some types of errors, the `View details` link will show you error +details. For many types of HTTP connection errors, however, there is very little information. The best way to troubleshoot these issues is use the [Chrome developer tools](https://developer.chrome.com/devtools/index). By pressing `F12` you can bring up the chrome dev tools. diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index 97d128f61a0fa..1bc2b5a2b92de 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -10,7 +10,7 @@ page_keywords: grafana, installation, windows guide Description | Download ------------ | ------------- -Zip package for Windows | [grafana.2.0.2.windows-x64.zip](https://grafanarel.s3.amazonaws.com/winbuilds/dist/grafana-2.0.2.windows-x64.zip) +Zip package for Windows | [grafana.2.5.0.windows-x64.zip](https://grafanarel.s3.amazonaws.com/winbuilds/dist/grafana-2.5.0.windows-x64.zip) ## Configure diff --git a/docs/sources/project/building_from_source.md b/docs/sources/project/building_from_source.md index ee8d6e66c4535..71e94b088fbae 100644 --- a/docs/sources/project/building_from_source.md +++ b/docs/sources/project/building_from_source.md @@ -6,16 +6,16 @@ page_keywords: grafana, build, contribute, documentation # Building Grafana from source -Guide for creating packages from source, and for getting grafana up and running in -dev environment. +This guide will help you create packages from source and get grafana up and running in +dev environment. Grafana ships with its own required backend server; also completely open-source. It's written in [Go](http://golang.org) and has a full [HTTP API](/v2.1/reference/http_api/). ## Dependencies -- Go 1.4 -- NodeJS +- [Go 1.4](https://golang.org/dl/) +- [NodeJS](https://nodejs.org/download/) ## Get Code - +Create a directory for the project and set your path accordingly. Then download and install Grafana into your $GOPATH directory ``` export GOPATH=`pwd` go get github.com/grafana/grafana @@ -24,16 +24,16 @@ go get github.com/grafana/grafana ## Building the backend ``` cd $GOPATH/src/github.com/grafana/grafana -go run build.go setup (only needed once to install godep) -$GOPATH/bin/godep restore (will pull down all golang lib dependecies in your current GOPATH) -go build . +go run build.go setup # (only needed once to install godep) +$GOPATH/bin/godep restore # (will pull down all golang lib dependencies in your current GOPATH) +go run build.go build # (or 'go build .') ``` -## Building on Windows +#### Building on Windows The Grafana backend includes Sqlite3 which requires GCC to compile. So in order to compile Grafana on windows you need to install GCC. We recommend [TDM-GCC](http://tdm-gcc.tdragon.net/download). -## Building frontend assets +## Build the Front-end Assets To build less to css for the frontend you will need a recent version of of node (v0.12.0), npm (v2.5.0) and grunt (v0.4.5). Run the following: @@ -51,12 +51,24 @@ go get github.com/Unknwon/bra bra run ``` -## Running +## Running Grafana Locally +You can run a local instance of Grafana by running: ``` -./grafana-server +./bin/grafana-server ``` +If you built the binary with `go run build.go build`, run `./bin/grafana-server` + +If you built it with `go build .`, run `./grafana` -Open grafana in your browser (default http://localhost:3000) and login with admin user (default user/pass = admin/admin). +Open grafana in your browser (default [http://localhost:3000](http://localhost:3000)) and login with admin user (default user/pass = admin/admin). + +## Developing for Grafana +To add features, customize your config, etc, you'll need to rebuild on source change (requires that you executed [godep restore](#build-the-backend), as outlined above). +``` +go get github.com/Unknwon/bra +bra run +``` +You'll also need to run `grunt watch` to watch for changes to the front-end. ## Creating optimized release packages This step builds linux packages and requires that fpm is installed. Install fpm via `gem install fpm`. @@ -73,6 +85,10 @@ You only need to add the options you want to override. Config files are applied 1. grafana.ini 2. custom.ini +Learn more about Grafana config options in the [Configuration section](/installation/configuration/) + ## Create a pull requests +Please contribute to the Grafana project and submit a pull request! Build new features, write or update documentation, fix bugs and generally make Grafana even more awesome. -Before or after your create a pull requests, sign the [contributor license agreement](/project/cla.html). +Before or after you create a pull request, sign the [contributor license agreement](/project/cla.html). +Together we can build amazing software faster. \ No newline at end of file diff --git a/docs/sources/project/cla.md b/docs/sources/project/cla.md index c1450bb6fa92c..4554300249dbe 100644 --- a/docs/sources/project/cla.md +++ b/docs/sources/project/cla.md @@ -28,7 +28,7 @@ but protects both the contributor and the company / foundation behind the projec It also gives us the option to relicense the code with a more permissive license in the future. -If you have more questions, shoot us an email or drop by #grafana on IRC (freenode). +If you have more questions, shoot us an [email](mailto:torkel@grafana.org) or drop by #grafana on IRC (freenode). Many thanks to [RethinkDB](http://rethinkdb.com) for permission to re-use their CLA! @@ -82,4 +82,4 @@ You are not expected to provide support for your contributions, except to the ex The failure of either party to enforce its rights under this agreement for any period shall not be construed as a waiver of such rights. No changes or modifications or waivers to this Agreement will be effective unless in writing and signed by both parties. In the event that any provision of this agreement shall be determined to be illegal or unenforceable, that provision will be limited or eliminated to the minimum extent necessary so that this agreement shall otherwise remain in full force and effect and enforceable. This agreement shall be governed by and construed in accordance with the laws of the State of California in the United States without regard to the conflicts of laws provisions thereof. In any action or proceeding to enforce rights under this agreement, the prevailing party will be entitled to recover costs and attorneys’ fees. - + \ No newline at end of file diff --git a/docs/sources/reference/admin.md b/docs/sources/reference/admin.md index 854147b4240ac..c5d5073020da6 100644 --- a/docs/sources/reference/admin.md +++ b/docs/sources/reference/admin.md @@ -24,7 +24,7 @@ modify Organization details and options. As a Grafana Administrator, you have complete access to any Organization or User in that instance of Grafana. -When performing actions as a Grafana admin, the sidebar will change it's apperance as below to indicate you are performing global server administration. +When performing actions as a Grafana admin, the sidebar will change it's appearance as below to indicate you are performing global server administration. From the Grafana Server Admin page, you can access the System Info page which summarizes all of the backend configuration settings of the Grafana server. diff --git a/docs/sources/reference/annotations.md b/docs/sources/reference/annotations.md index b0e84ef762b04..51852abcdf2ad 100644 --- a/docs/sources/reference/annotations.md +++ b/docs/sources/reference/annotations.md @@ -5,14 +5,17 @@ page_keywords: grafana, annotations, guide, documentation --- # Annotations -![](/img/v1/annotated_graph1.png) Annotations provide a way to mark points on the graph with rich events. When you hover over an annotation you can get title, tags, and text information for the event. +![](/img/v1/annotated_graph1.png) + To add an annotation query click dashboard settings icon in top menu and select `Annotations` from the dropdown. This will open the `Annotations` edit view. Click the `Add` tab to add a new annotation query. +> Note: Annotations apply to all graphs in a given dashboard, not on a per-panel basis. + ## Graphite annotations Graphite supports two ways to query annotations. diff --git a/docs/sources/reference/dashboard.md b/docs/sources/reference/dashboard.md new file mode 100644 index 0000000000000..93adf5cd7898e --- /dev/null +++ b/docs/sources/reference/dashboard.md @@ -0,0 +1,422 @@ +---- +page_title: Dashboard JSON +page_description: Dashboard JSON Reference +page_keywords: grafana, dashboard, json, documentation +--- + +# Dashboard JSON + +## Overview + +A dashboard in Grafana is represented by a JSON object, which stores metadata of its dashboard. Dashboard metadata includes dashboard properties, metadata from rows, panels, template variables, panel queries, etc. + +To view the JSON of a dashboard, follow the steps mentioned below: + + 1. Go to a dashboard + 2. Click on `Manage dashboard` menu on the top navigation bar + 3. Select `View JSON` from the dropdown menu + +## JSON fields + +When a user creates a new dashboard, a new dashboard JSON object is initialized with the following fields: + +> Note: In the following JSON, id is shown as null which is the default value assigned to it until a dashboard is saved. Once a dashboard is saved, an integer value is assigned to the `id` field. + +``` +{ + "id": null, + "title": "New dashboard", + "originalTitle": "New dashboard", + "tags": [], + "style": "dark", + "timezone": "browser", + "editable": true, + "hideControls": false, + "sharedCrosshair": false, + "rows": [], + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "time_options": [], + "refresh_intervals": [] + }, + "templating": { + "list": [] + }, + "annotations": { + "list": [] + }, + "schemaVersion": 7, + "version": 0, + "links": [] +} +``` +Each field in the dashboard JSON is explained below with its usage: + +| Name | Usage | +| ---- | ----- | +| **id** | unique dashboard id, an integer | +| **title** | current title of dashboard | +| **originalTitle** | title of dashboard when saved for the first time | +| **tags** | tags associated with dashboard, an array of strings | +| **style** | theme of dashboard, i.e. `dark` or `light` | +| **timezone** | timezone of dashboard, i.e. `utc` or `browser` | +| **editable** | whether a dashboard is editable or not | +| **hideControls** | whether row controls on the left in green are hidden or not | +| **sharedCrosshair** | TODO | +| **rows** | row metadata, see [rows section](/docs/sources/reference/dashboard.md/#rows) for details | +| **time** | time range for dashboard, i.e. last 6 hours, last 7 days, etc | +| **timepicker** | timepicker metadata, see [timepicker section](/docs/sources/reference/dashboard.md/#timepicker) for details | +| **templating** | templating metadata, see [templating section](/docs/sources/reference/dashboard.md/#templating) for details | +| **annotations** | annotations metadata, see [annotations section](/docs/sources/reference/dashboard.md/#annotations) for details | +| **schemaVersion** | TODO | +| **version** | TODO | +| **links** | TODO | + +### rows + +`rows` field consists of an array of JSON object representing each row in a dashboard, such as shown below: + +``` + "rows": [ + { + "collapse": false, + "editable": true, + "height": "200px", + "panels": [], + "title": "New row" + }, + { + "collapse": true, + "editable": true, + "height": "300px", + "panels": [], + "title": "New row" + } + ] +``` + +Usage of the fields is explained below: + +| Name | Usage | +| ---- | ----- | +| **collapse** | whether row is collapsed or not | +| **editable** | whether a row is editable or not | +| **height** | height of the row in pixels | +| **panels** | panels metadata, see [panels section](/docs/sources/reference/dashboard.md/#panels) for details | +| **title** | title of row | + +#### panels + +Panels are the building blocks a dashboard. It consists of datasource queries, type of graphs, aliases, etc. Panel JSON consists of an array of JSON objects, each representing a different panel in a row. Most of the fields are common for all panels but some fields depends on the panel type. Following is an example of panel JSON representing a `graph` panel type: + +``` +"panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": null, + "editable": true, + "error": false, + "fill": 0, + "grid": { + "leftLogBase": 1, + "leftMax": null, + "leftMin": null, + "rightLogBase": 1, + "rightMax": null, + "rightMin": null, + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "aggregator": "max", + "alias": "$tag_instance_id", + "currentTagKey": "", + "currentTagValue": "", + "downsampleAggregator": "avg", + "downsampleInterval": "", + "errors": {}, + "metric": "memory.percent-used", + "refId": "A", + "shouldComputeRate": false, + "tags": { + "app": "$app", + "env": "stage", + "instance_id": "*" + } + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "value_type": "cumulative" + }, + "type": "graph", + "x-axis": true, + "y-axis": true, + "y_formats": [ + "percent", + "short" + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": null, + "editable": true, + "error": false, + "fill": 0, + "grid": { + "leftLogBase": 1, + "leftMax": null, + "leftMin": null, + "rightLogBase": 1, + "rightMax": null, + "rightMin": null, + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "aggregator": "avg", + "alias": "$tag_instance_id", + "currentTagKey": "", + "currentTagValue": "", + "downsampleAggregator": "avg", + "downsampleInterval": "", + "errors": {}, + "metric": "memory.percent-cached", + "refId": "A", + "shouldComputeRate": false, + "tags": { + "app": "$app", + "env": "prod", + "instance_id": "*" + } + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Cached", + "tooltip": { + "shared": true, + "value_type": "cumulative" + }, + "type": "graph", + "x-axis": true, + "y-axis": true, + "y_formats": [ + "short", + "short" + ] + }, +``` + +Usage of each field is explained below: + +| Name | Usage | +| ---- | ----- | +| TODO | TODO | + +### timepicker + +Description: TODO + +``` +"timepicker": { + "collapse": false, + "enable": true, + "notice": false, + "now": true, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "status": "Stable", + "time_options": [ + "5m", + "15m", + "1h", + "3h", + "6h", + "12h", + "24h", + "2d", + "3d", + "4d", + "7d", + "30d" + ], + "type": "timepicker" + } +``` + +Usage of the fields is explained below: + +| Name | Usage | +| ---- | ----- | +| **collapse** | whether timepicker is collapsed or not | +| **enable** | whether timepicker is enabled or not | +| **notice** | TODO | +| **now** | TODO | +| **refresh_intervals** | TODO | +| **status** | TODO | +| **time_options** | TODO | +| **type** | TODO | + +### templating + +`templating` fields contains array of template variables with their saved values along with some other metadata, for example: + +``` + "templating": { + "enable": true, + "list": [ + { + "allFormat": "wildcard", + "current": { + "tags": [], + "text": "prod", + "value": "prod" + }, + "datasource": null, + "includeAll": true, + "name": "env", + "options": [ + { + "selected": false, + "text": "All", + "value": "*" + }, + { + "selected": false, + "text": "stage", + "value": "stage" + }, + { + "selected": false, + "text": "test", + "value": "test" + } + ], + "query": "tag_values(cpu.utilization.average,env)", + "refresh": false, + "refresh": false, + "type": "query" + }, + { + "allFormat": "wildcard", + "current": { + "text": "apache", + "value": "apache" + }, + "datasource": null, + "includeAll": false, + "multi": false, + "multiFormat": "glob", + "name": "app", + "options": [ + { + "selected": true, + "text": "tomcat", + "value": "tomcat" + }, + { + "selected": false, + "text": "cassandra", + "value": "cassandra" + } + ], + "query": "tag_values(cpu.utilization.average,app)", + "refresh": false, + "regex": "", + "type": "query" + } + ] + } +``` + +Usage of the above mentioned fields in the templating section is explained below: + +| Name | Usage | +| ---- | ----- | +| **enable** | whether templating is enabled or not | +| **list** | an array of objects representing, each representing one template variable | +| **allFormat** | format to use while fetching all values from datasource, eg: `wildcard`, `glob`, `regex`, `pipe`, etc. | +| **current** | shows current selected variable text/value on the dashboard | +| **datasource** | shows datasource for the variables | +| **includeAll** | whether all value option is available or not | +| **multi** | whether multiple values can be selected or not from variable value list | +| **multiFormat** | format to use while fetching timeseries from datasource | +| **name** | name of variable | +| **options** | array of variable text/value pairs available for selection on dashboard | +| **query** | datasource query used to fetch values for a variable | +| **refresh** | TODO | +| **regex** | TODO | +| **type** | type of variable, i.e. `custom`, `query` or `interval` | + +### annotations + +TODO diff --git a/docs/sources/reference/dashlist.md b/docs/sources/reference/dashlist.md index 9a54a18e2887c..ea098541da2c7 100644 --- a/docs/sources/reference/dashlist.md +++ b/docs/sources/reference/dashlist.md @@ -7,14 +7,26 @@ page_keywords: grafana, dashlist, panel, documentation # Dashlist Panel ## Overview -![](/img/v2/dashboard_list_panel.png) -The dashboard list panel allows you to show a list of links to other dashboards. The list -can be based on a search query or dashboard tag query. You can also configure it to show your starred -dashboards. +The dashboard list panel allows you to display dynamic links to other dashboards. The list can be configured to use starred dashboards, a search query and/or dashboard tags. + + + +> On each dashboard load, the dashlist panel will re-query the dashboard list, always providing the most up to date results. + +## Mode: Starred Dashboards + +The `starred` dashboard selection displays starred dashboards, up to the number specified in the `Limit Number to` field, in alphabetical order. On dashboard load, the dashlist panel will re-query the favorites to appear in dashboard list panel, always providing the most up to date results. + + + + +## Mode: Search Dashboards + +The panel may be configured to search by either string query or tag(s). On dashboard load, the dashlist panel will re-query the dashboard list, always providing the most up to date results. + +To configure dashboard list in this manner, select `search` from the Mode select box. When selected, the Search Options section will appear. -## Options -![](/img/v2/dashboard_list_panel_options.png) Name | Description ------------ | ------------- @@ -24,4 +36,17 @@ Tags | if in search mode specify dashboard tags to search for Limit number to | Specify the maximum number of dashboards +### Search by string + +To search by a string, enter a search query in the `Search Options: Query` field. Queries are case-insensitive, and partial values are accepted. + + +### Search by tag +To search by one or more tags, enter your selection in the `Search Options: Tags:` field. Note that existing tags will not appear as you type, and *are* case sensitive. To see a list of existing tags, you can always return to the dashboard, open the Dashboard Picker at the top and click `tags` link in the search bar. + + +> When multiple tags and strings appear, the dashboard list will display those matching ALL conditions. + + + diff --git a/docs/sources/reference/export_import.md b/docs/sources/reference/export_import.md index 8ae39cb0cf275..e83c68401d4e0 100644 --- a/docs/sources/reference/export_import.md +++ b/docs/sources/reference/export_import.md @@ -6,5 +6,80 @@ page_keywords: grafana, export, import, documentation # Export and Import -You find the import view in the bottom of the search dropdown. From this view you -can import local json files or migrate dashboards stored in Elasticsearch or InfluxDB. +## Exporting a dashboard + +Dashboards are exported in Grafana JSON format, and contain everything you need (layout, variables, styles, data sources, queries, etc)to import the dashboard at a later time. + +#### Export to file + +To export a dashboard, locate the settings menu within the desired dashboard and click the gear icon. The export option will always be available, and will open a browser save-as dialog window. + + + +#### Copy JSON + +The raw JSON may be accessed directly from within the interface and copy/pasted into an editor of your choice to be saved later. To view this JSON, locate the settings menu within the desired dashboard and click the gear icon. The View JSON option will always be available, and will open the raw JSON in a text area. To copy the entire JSON file, click into the text area, the select all `CTRL`+`A` (PC, Linux) or `⌘`+`A` (Mac). + + + +## Importing a dashboard + +Grafana 2.0 now has integrated dashboard storage engine that can be configured to use an internal sqlite3 database, MySQL, or Postgres. This eliminates the need to use Elasticsearch for dashboard storage for Graphite users. Grafana 2.0 does not support storing dashboards in InfluxDB. + +The import view can be found at the Dashboard Picker dropdown, next to the New Dashboard and Playlist buttons. + + + + +#### Import from a file + +To import a dashboard through a local JSON file, click the 'Choose file' button in the Import from File section. Note that JSON is not linted or validated prior during upload, so we recommend validating locally if you're editing. In a pinch, you can use http://jsonlint.com/, and if you are editing dashboard JSON frequently, there are linter plugins for popular text editors. + + +#### Importing dashboards from Elasticsearch + +Start by going to the `Data Sources` view (via the side menu), and make sure your Elasticsearch data source is added. Specify the Elasticsearch index name where your existing Grafana v1.x dashboards are stored (the default is `grafana-dash`). + +![](/img/v2/datasource_edit_elastic.jpg) + +#### Importing dashboards from InfluxDB + +Start by going to the `Data Sources` view (via the side menu), and make sure your InfluxDB data source is added. Specify the database name where your Grafana v1.x dashboards are stored, the default is `grafana`. + +### Import view + +In the Import view you find the section `Migrate dashboards`. Pick the data source you added (from Elasticsearch or InfluxDB), and click the `Import` button. + +![](/img/v2/migrate_dashboards.jpg) + +Your dashboards should be automatically imported into the Grafana 2.0 back-end. Dashboards will no longer be stored in your previous Elasticsearch or InfluxDB databases. + + +## Troubleshooting + +### Template variables could not be initialized. + +When importing a dashboard, keep an eye out for template variables in your JSON that may not exist in your instance of Grafana. For example, + + "templating": { + "list": [ + { + "allFormat": "glob", + "current": { + "tags": [], + "text": "google_com + monkey_id_au", + "value": [ + "google_com", + "monkey_id_au" + ] + }, + "datasource": null, + +To resolve this, remove any unnecessary JSON that may be specific to the instance you are exporting from. In this case, we can remove the entire "current" section entirely, and Grafana will populate default. + + "templating": { + "list": [ + { + "allFormat": "glob", + "datasource": null, + \ No newline at end of file diff --git a/docs/sources/reference/graph.md b/docs/sources/reference/graph.md index 9de23332a99a3..9c2ab63fbaa87 100644 --- a/docs/sources/reference/graph.md +++ b/docs/sources/reference/graph.md @@ -30,7 +30,7 @@ The drilldown section allows adding dynamic links to the panel that can link to or URLs Each link has a title, a type and params. A link can be either a ``dashboard`` or ``absolute`` links. -If it is a dashboard links, the `dashboard` value must be the name of a dashbaord. If it's an +If it is a dashboard links, the `dashboard` value must be the name of a dashboard. If it's an `absolute` link, the URL is the URL to link. ``params`` allows adding additional URL params to the links. The format is the ``name=value`` with @@ -127,7 +127,7 @@ If you have stack enabled you can select what the mouse hover feature should sho ### Rendering - ``Flot`` - Render the graphs in the browser using Flot (default) -- ``Graphite PNG`` - Render the graph on the server using graphites render API. +- ``Graphite PNG`` - Render the graph on the server using graphite's render API. ### Tooltip diff --git a/docs/sources/reference/http_api.md b/docs/sources/reference/http_api.md index 609f12269ca24..15e7a06f3b82d 100644 --- a/docs/sources/reference/http_api.md +++ b/docs/sources/reference/http_api.md @@ -36,9 +36,9 @@ You use the token in all requests in the `Authorization` header, like this: **Example**: - GET http://your.grafana.com/api/dashboards/db/mydash HTTP/1.1 - Accept: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET http://your.grafana.com/api/dashboards/db/mydash HTTP/1.1 + Accept: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk The `Authorization` header value should be `Bearer `. @@ -52,26 +52,26 @@ Creates a new dashboard or updates an existing dashboard. **Example Request for new dashboard**: - POST /api/dashboards/db HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + POST /api/dashboards/db HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - { - "dashboard": { - "id": null, - "title": "Production Overview", - "tags": [ "templated" ], - "timezone": "browser", - "rows": [ - { - } - ], - "schemaVersion": 6, - "version": 0 - }, - "overwrite": false - } + { + "dashboard": { + "id": null, + "title": "Production Overview", + "tags": [ "templated" ], + "timezone": "browser", + "rows": [ + { + } + ], + "schemaVersion": 6, + "version": 0 + }, + "overwrite": false + } JSON Body schema: @@ -119,34 +119,34 @@ Will return the dashboard given the dashboard slug. Slug is the url friendly ver **Example Request**: - GET /api/dashboards/db/production-overview HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/dashboards/db/production-overview HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json - { - "meta": { - "isStarred": false, - "slug": "production-overview" - }, - "model": { - "id": null, - "title": "Production Overview", - "tags": [ "templated" ], - "timezone": "browser", - "rows": [ - { - } - ] - "schemaVersion": 6, - "version": 0 - }, - } + { + "meta": { + "isStarred": false, + "slug": "production-overview" + }, + "model": { + "id": null, + "title": "Production Overview", + "tags": [ "templated" ], + "timezone": "browser", + "rows": [ + { + } + ], + "schemaVersion": 6, + "version": 0 + } + } ### Delete dashboard @@ -156,17 +156,17 @@ The above will delete the dashboard with the specified slug. The slug is the url **Example Request**: - DELETE /api/dashboards/db/test HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + DELETE /api/dashboards/db/test HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json - {"title": "Test"} + {"title": "Test"} ### Gets the home dashboard @@ -176,53 +176,53 @@ Will return the home dashboard. **Example Request**: - GET /api/dashboards/home HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/dashboards/home HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - { - "meta": { - "isHome":true, - "canSave":false, - "canEdit":false, - "canStar":false, - "slug":"", - "expires":"0001-01-01T00:00:00Z", - "created":"0001-01-01T00:00:00Z" - }, - "dashboard": { - "editable":false, - "hideControls":true, - "nav":[ - { - "enable":false, - "type":"timepicker" - } - ], - "rows": [ - { - - } - ], - "style":"dark", - "tags":[], - "templating":{ - "list":[ - ] - }, - "time":{ - }, - "timezone":"browser", - "title":"Home", - "version":5 - } + HTTP/1.1 200 + Content-Type: application/json + + { + "meta": { + "isHome":true, + "canSave":false, + "canEdit":false, + "canStar":false, + "slug":"", + "expires":"0001-01-01T00:00:00Z", + "created":"0001-01-01T00:00:00Z" + }, + "dashboard": { + "editable":false, + "hideControls":true, + "nav":[ + { + "enable":false, + "type":"timepicker" } + ], + "rows": [ + { + + } + ], + "style":"dark", + "tags":[], + "templating":{ + "list":[ + ] + }, + "time":{ + }, + "timezone":"browser", + "title":"Home", + "version":5 + } + } ### Tags for Dashboard @@ -233,27 +233,27 @@ Get all tabs of dashboards **Example Request**: - GET /api/dashboards/home HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/dashboards/home HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - [ - { - "term":"tag1", - "count":1 - }, - { - "term":"tag2", - "count":4 - } - ] - + HTTP/1.1 200 + Content-Type: application/json + + [ + { + "term":"tag1", + "count":1 + }, + { + "term":"tag2", + "count":4 + } + ] + ### Dashboard from JSON file `GET /file/:file` @@ -271,27 +271,27 @@ Status Codes: **Example Request**: - GET /api/search?query=MyDashboard&starred=true&tag=prod HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/search?query=MyDashboard&starred=true&tag=prod HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - [ - { - "id":1, - "title":"Production Overview", - "uri":"db/production-overview", - "type":"dash-db", - "tags":[], - "isStarred":false - } - ] - + HTTP/1.1 200 + Content-Type: application/json + + [ + { + "id":1, + "title":"Production Overview", + "uri":"db/production-overview", + "type":"dash-db", + "tags":[], + "isStarred":false + } + ] + ## Data sources ### Get all datasources @@ -300,34 +300,34 @@ Status Codes: **Example Request**: - GET /api/datasources HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/datasources HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - [ - { - "id":1, - "orgId":1, - "name":"datasource_elastic", - "type":"elasticsearch", - "access":"proxy", - "url":"http://mydatasource.com", - "password":"", - "user":"", - "database":"grafana-dash", - "basicAuth":false, - "basicAuthUser":"", - "basicAuthPassword":"", - "isDefault":false, - "jsonData":null - } - ] + HTTP/1.1 200 + Content-Type: application/json + + [ + { + "id":1, + "orgId":1, + "name":"datasource_elastic", + "type":"elasticsearch", + "access":"proxy", + "url":"http://mydatasource.com", + "password":"", + "user":"", + "database":"grafana-dash", + "basicAuth":false, + "basicAuthUser":"", + "basicAuthPassword":"", + "isDefault":false, + "jsonData":null + } + ] ### Get a single data sources by Id @@ -335,32 +335,32 @@ Status Codes: **Example Request**: - GET /api/datasources/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/datasources/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - { - "id":1, - "orgId":1, - "name":"test_datasource", - "type":"graphite", - "access":"proxy", - "url":"http://mydatasource.com", - "password":"", - "user":"", - "database":"", - "basicAuth":false, - "basicAuthUser":"", - "basicAuthPassword":"", - "isDefault":false, - "jsonData":null - } + HTTP/1.1 200 + Content-Type: application/json + + { + "id":1, + "orgId":1, + "name":"test_datasource", + "type":"graphite", + "access":"proxy", + "url":"http://mydatasource.com", + "password":"", + "user":"", + "database":"", + "basicAuth":false, + "basicAuthUser":"", + "basicAuthPassword":"", + "isDefault":false, + "jsonData":null + } ### Create data source @@ -368,26 +368,26 @@ Status Codes: **Example Request**: - POST /api/datasources HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + POST /api/datasources HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - { - "name":"test_datasource", - "type":"graphite", - "url":"http://mydatasource.com", - "access":"proxy", - "basicAuth":false - } + { + "name":"test_datasource", + "type":"graphite", + "url":"http://mydatasource.com", + "access":"proxy", + "basicAuth":false + } **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json - {"id":1,"message":"Datasource added"} + {"id":1,"message":"Datasource added"} ### Update an existing data source @@ -395,34 +395,34 @@ Status Codes: **Example Request**: - PUT /api/datasources/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - - { - "id":1, - "orgId":1, - "name":"test_datasource", - "type":"graphite", - "access":"proxy", - "url":"http://mydatasource.com", - "password":"", - "user":"", - "database":"", - "basicAuth":true, - "basicAuthUser":"basicuser", - "basicAuthPassword":"basicuser", - "isDefault":false, - "jsonData":null - } + PUT /api/datasources/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "id":1, + "orgId":1, + "name":"test_datasource", + "type":"graphite", + "access":"proxy", + "url":"http://mydatasource.com", + "password":"", + "user":"", + "database":"", + "basicAuth":true, + "basicAuthUser":"basicuser", + "basicAuthPassword":"basicuser", + "isDefault":false, + "jsonData":null + } **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json - {"message":"Datasource updated"} + {"message":"Datasource updated"} ### Delete an existing data source @@ -430,17 +430,17 @@ Status Codes: **Example Request**: - DELETE /api/datasources/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + DELETE /api/datasources/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json - {"message":"Data source deleted"} + {"message":"Data source deleted"} ### Available data source types @@ -448,29 +448,29 @@ Status Codes: **Example Request**: - GET /api/datasources/plugins HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + GET /api/datasources/plugins HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - { - "grafana":{ - "metrics":true,"module":"plugins/datasource/grafana/datasource", - "name":"Grafana (for testing)", - "partials":{ - "query":"app/plugins/datasource/grafana/partials/query.editor.html" - }, - "pluginType":"datasource", - "serviceName":"GrafanaDatasource", - "type":"grafana" - } - } - + HTTP/1.1 200 + Content-Type: application/json + + { + "grafana":{ + "metrics":true,"module":"plugins/datasource/grafana/datasource", + "name":"Grafana (for testing)", + "partials":{ + "query":"app/plugins/datasource/grafana/partials/query.editor.html" + }, + "pluginType":"datasource", + "serviceName":"GrafanaDatasource", + "type":"grafana" + } + } + ## Data source proxy calls `GET /api/datasources/proxy/:datasourceId/*` @@ -485,20 +485,20 @@ Proxies all calls to the actual datasource. **Example Request**: - GET /api/org HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/org HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json - { - "id":1, - "name":"Main Org." - } + { + "id":1, + "name":"Main Org." + } ### Update current Organisation @@ -506,49 +506,49 @@ Proxies all calls to the actual datasource. **Example Request**: - PUT /api/org HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + PUT /api/org HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "name":"Main Org." + } + - { - "name":"Main Org." - } - - **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json + + {"message":"Organization updated"} + - {"message":"Organization updated"} - - ### Get all users within the actual organisation `GET /api/org/users` **Example Request**: - GET /api/org/users HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/org/users HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json - [ - { - "orgId":1, - "userId":1, - "email":"admin@mygraf.com", - "login":"admin", - "role":"Admin" - } - ] + [ + { + "orgId":1, + "userId":1, + "email":"admin@mygraf.com", + "login":"admin", + "role":"Admin" + } + ] ### Add a new user to the actual organisation @@ -558,65 +558,65 @@ Adds a global user to the actual organisation. **Example Request**: - POST /api/org/users HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - - { - "role": "Admin", - "loginOrEmail": "admin" - } - - + POST /api/org/users HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "role": "Admin", + "loginOrEmail": "admin" + } + + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"User added to organization"} - + HTTP/1.1 200 + Content-Type: application/json + + {"message":"User added to organization"} + ### Updates the given user `PATCH /api/org/users/:userId` **Example Request**: - PATCH /api/org/users/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + PATCH /api/org/users/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "role": "Viewer", + } + - { - "role": "Viewer", - } - - **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"Organization user updated"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"Organization user updated"} + - ### Delete user in actual organisation `DELETE /api/org/users/:userId` **Example Request**: - DELETE /api/org/users/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + DELETE /api/org/users/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"User removed from organization"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"User removed from organization"} ## Organisations @@ -627,22 +627,22 @@ Adds a global user to the actual organisation. **Example Request**: - GET /api/orgs HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/orgs HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - [ - { - "id":1, - "name":"Main Org." - } - ] + HTTP/1.1 200 + Content-Type: application/json + + [ + { + "id":1, + "name":"Main Org." + } + ] ### Update Organisation @@ -652,22 +652,22 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - PUT /api/orgs/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + PUT /api/orgs/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "name":"Main Org 2." + } + - { - "name":"Main Org 2." - } - - **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"Organization updated"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"Organization updated"} ### Get Users in Organisation @@ -675,24 +675,24 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - GET /api/orgs/1/users HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/orgs/1/users HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - [ - { - "orgId":1, - "userId":1, - "email":"admin@mygraf.com", - "login":"admin", - "role":"Admin" - } - ] + HTTP/1.1 200 + Content-Type: application/json + [ + { + "orgId":1, + "userId":1, + "email":"admin@mygraf.com", + "login":"admin", + "role":"Admin" + } + ] ### Add User in Organisation @@ -700,22 +700,22 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - POST /api/orgs/1/users HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + POST /api/orgs/1/users HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "loginOrEmail":"user", + "role":"Viewer" + } - { - "loginOrEmail":"user", - "role":"Viewer" - } - **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"User added to organization"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"User added to organization"} ### Update Users in Organisation @@ -723,21 +723,21 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - PATCH /api/orgs/1/users/2 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + PATCH /api/orgs/1/users/2 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "role":"Admin" + } - { - "role":"Admin" - } - **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"Organization user updated"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"Organization user updated"} ### Delete User in Organisation @@ -745,17 +745,17 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - DELETE /api/orgs/1/users/2 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + DELETE /api/orgs/1/users/2 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"User removed from organization"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"User removed from organization"} ## Users @@ -765,31 +765,32 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - GET /api/users HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/users HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - [ - { - "id": 1, - "name": "Admin", - "login": "admin", - "email": "admin@mygraf.com", - "isAdmin": true - }, - { - "id": 2, - "name": "User", - "login": "user", - "email": "user@mygraf.com" - "isAdmin": false - } - ] + HTTP/1.1 200 + Content-Type: application/json + + [ + { + "id": 1, + "name": "Admin", + "login": "admin", + "email": "admin@mygraf.com", + "isAdmin": true + }, + { + "id": 2, + "name": "User", + "login": "user", + "email": "user@mygraf.com", + "isAdmin": false + } + ] ### Get single user by Id @@ -797,24 +798,24 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - GET /api/users/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/users/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - { - "email": "user@mygraf.com" - "name": "admin", - "login": "admin", - "theme": "light", - "orgId": 1, - "isGrafanaAdmin": true - } + HTTP/1.1 200 + Content-Type: application/json + + { + "email": "user@mygraf.com" + "name": "admin", + "login": "admin", + "theme": "light", + "orgId": 1, + "isGrafanaAdmin": true + } ### User Update @@ -822,23 +823,24 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - PUT /api/users/2 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - - { - "email":"user@mygraf.com", - "name":"User2", - "login":"user", - "theme":"light" - } + PUT /api/users/2 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "email":"user@mygraf.com", + "name":"User2", + "login":"user", + "theme":"light" + } + **Example Response**: - - HTTP/1.1 200 - Content-Type: application/json - - {"message":"User updated"} + + HTTP/1.1 200 + Content-Type: application/json + + {"message":"User updated"} ### Get Organisations for user @@ -847,23 +849,23 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - GET /api/users/1/orgs HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/users/1/orgs HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json + HTTP/1.1 200 + Content-Type: application/json - [ - { - "orgId":1, - "name":"Main Org.", - "role":"Admin" - } - ] + [ + { + "orgId":1, + "name":"Main Org.", + "role":"Admin" + } + ] ## User @@ -873,24 +875,24 @@ Update Organisation, fields *Adress 1*, *Adress 2*, *City* are not implemented y **Example Request**: - GET /api/user HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + GET /api/user HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - { - "email":"admin@mygraf.com", - "name":"Admin", - "login":"admin", - "theme":"light", - "orgId":1, - "isGrafanaAdmin":true - } + HTTP/1.1 200 + Content-Type: application/json + + { + "email":"admin@mygraf.com", + "name":"Admin", + "login":"admin", + "theme":"light", + "orgId":1, + "isGrafanaAdmin":true + } ### Change Password @@ -900,22 +902,23 @@ Changes the password for the user **Example Request**: - PUT /api/user/password HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + PUT /api/user/password HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "oldPassword": "old_password", + "newPassword": "new_password", + "confirmNew": "confirm_new_password" + } - { - "password": ""new_password" - } - - **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"User password updated"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"User password changed"} ### Switch user context @@ -925,17 +928,17 @@ Switch user context to the given organisation. **Example Request**: - POST /api/user/using/2 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + POST /api/user/using/2 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"Active organization changed"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"Active organization changed"} ### Organisations of the actual User @@ -945,23 +948,23 @@ Return a list of all organisations of the current user. **Example Request**: - GET /api/user/orgs HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + GET /api/user/orgs HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - [ - { - "orgId":1, - "name":"Main Org.", - "role":"Admin" - } - ] + HTTP/1.1 200 + Content-Type: application/json + + [ + { + "orgId":1, + "name":"Main Org.", + "role":"Admin" + } + ] ### Star a dashboard @@ -971,17 +974,17 @@ Stars the given Dashboard for the actual user. **Example Request**: - POST /api/user/stars/dashboard/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + POST /api/user/stars/dashboard/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"Dashboard starred!"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"Dashboard starred!"} ### Unstar a dashboard @@ -991,17 +994,17 @@ Deletes the starring of the given Dashboard for the actual user. **Example Request**: - DELETE /api/user/stars/dashboard/1 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + DELETE /api/user/stars/dashboard/1 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"Dashboard unstarred"} + HTTP/1.1 200 + Content-Type: application/json + + {"message":"Dashboard unstarred"} ## Snapshots @@ -1012,112 +1015,111 @@ Deletes the starring of the given Dashboard for the actual user. **Example Request**: - POST /api/snapshots HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - - { - "dashboard": { - "editable":false, - "hideControls":true, - "nav":[ - { - "enable":false, - "type":"timepicker" - } - ], - "rows": [ - { - - } - ], - "style":"dark", - "tags":[], - "templating":{ - "list":[ - ] - }, - "time":{ - }, - "timezone":"browser", - "title":"Home", - "version":5 - } - "expires": 3600 - } - + POST /api/snapshots HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "dashboard": { + "editable":false, + "hideControls":true, + "nav":[ + { + "enable":false, + "type":"timepicker" + } + ], + "rows": [ + { + + } + ], + "style":"dark", + "tags":[], + "templating":{ + "list":[ + ] + }, + "time":{ + }, + "timezone":"browser", + "title":"Home", + "version":5 + }, + "expires": 3600 + } + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - { - "deleteKey":"XXXXXXX", - "deleteUrl":"myurl/dashboard/snapshot/XXXXXXX", - "key":"YYYYYYY", - "url":"myurl/dashboard/snapshot/YYYYYYY" - } + HTTP/1.1 200 + Content-Type: application/json + { + "deleteKey":"XXXXXXX", + "deleteUrl":"myurl/dashboard/snapshot/XXXXXXX", + "key":"YYYYYYY", + "url":"myurl/dashboard/snapshot/YYYYYYY" + } Keys: - **deleteKey** – Key generated to delete the snapshot - **key** – Key generated to share the dashboard - + ### Get Snapshot by Id - + `GET /api/snapshots/:key` **Example Request**: - GET /api/snapshots/YYYYYYY HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + GET /api/snapshots/YYYYYYY HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - { - "meta":{ - "isSnapshot":true, - "type":"snapshot", - "canSave":false, - "canEdit":false, - "canStar":false, - "slug":"", - "expires":"2200-13-32T25:23:23+02:00", - "created":"2200-13-32T28:24:23+02:00"}, - - { - "dashboard": { - "editable":false, - "hideControls":true, - "nav":[ - { - "enable":false, - "type":"timepicker" - } - ], - "rows": [ - { - - } - ], - "style":"dark", - "tags":[], - "templating":{ - "list":[ - ] - }, - "time":{ - }, - "timezone":"browser", - "title":"Home", - "version":5 - } - } + HTTP/1.1 200 + Content-Type: application/json + + { + "meta":{ + "isSnapshot":true, + "type":"snapshot", + "canSave":false, + "canEdit":false, + "canStar":false, + "slug":"", + "expires":"2200-13-32T25:23:23+02:00", + "created":"2200-13-32T28:24:23+02:00" + }, + "dashboard": { + "editable":false, + "hideControls":true, + "nav":[ + { + "enable":false, + "type":"timepicker" + } + ], + "rows": [ + { + + } + ], + "style":"dark", + "tags":[], + "templating":{ + "list":[ + ] + }, + "time":{ + }, + "timezone":"browser", + "title":"Home", + "version":5 + } + } ### Delete Snapshot by Id @@ -1125,18 +1127,18 @@ Keys: **Example Request**: - GET /api/snapshots/YYYYYYY HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/snapshots/YYYYYYY HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"Snapshot deleted. It might take an hour before it's cleared from a CDN cache."} - + HTTP/1.1 200 + Content-Type: application/json + + {"message":"Snapshot deleted. It might take an hour before it's cleared from a CDN cache."} + ## Frontend Settings @@ -1146,45 +1148,44 @@ Keys: **Example Request**: - GET /api/frontend/settings HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/frontend/settings HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - { - "allowOrgCreate":true, - "appSubUrl":"", - "buildInfo":{ - "buildstamp":xxxxxx, - "commit":"vyyyy", - "version":"zzzzz" - }, - "datasources":{ - "datasourcename":{ - "index":"grafana-dash", - "meta":{ - "annotations":true, - "module":"plugins/datasource/grafana/datasource", - "name":"Grafana", - "partials":{ - "annotations":"app/plugins/datasource/grafana/partials/annotations.editor.html", - "config":"app/plugins/datasource/grafana/partials/config.html" - }, - "pluginType":"datasource", - "serviceName":"Grafana", - "type":"grafanasearch" - } - } - } - - defaultDatasource: "Grafana" - } - + HTTP/1.1 200 + Content-Type: application/json + + { + "allowOrgCreate":true, + "appSubUrl":"", + "buildInfo":{ + "buildstamp":xxxxxx, + "commit":"vyyyy", + "version":"zzzzz" + }, + "datasources":{ + "datasourcename":{ + "index":"grafana-dash", + "meta":{ + "annotations":true, + "module":"plugins/datasource/grafana/datasource", + "name":"Grafana", + "partials":{ + "annotations":"app/plugins/datasource/grafana/partials/annotations.editor.html", + "config":"app/plugins/datasource/grafana/partials/config.html" + }, + "pluginType":"datasource", + "serviceName":"Grafana", + "type":"grafanasearch" + } + } + }, + "defaultDatasource": "Grafana" + } + ## Login ### Renew session based on remember cookie @@ -1193,18 +1194,18 @@ Keys: **Example Request**: - GET /api/login/ping HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/login/ping HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message": "Logged in"} - + HTTP/1.1 200 + Content-Type: application/json + + {"message": "Logged in"} + ## Admin ### Settings @@ -1213,155 +1214,155 @@ Keys: **Example Request**: - GET /api/admin/settings - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + GET /api/admin/settings + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - { - "DEFAULT": - { - "app_mode":"production"}, - "analytics": - { - "google_analytics_ua_id":"", - "reporting_enabled":"false" - }, - "auth.anonymous":{ - "enabled":"true", - "org_name":"Main Org.", - "org_role":"Viewer" - }, - "auth.basic":{ - "enabled":"false" - }, - "auth.github":{ - "allow_sign_up":"false", - "allowed_domains":"", - "allowed_organizations":"", - "api_url":"https://api.github.com/user", - "auth_url":"https://github.com/login/oauth/authorize", - "client_id":"some_id", - "client_secret":"************", - "enabled":"false", - "scopes":"user:email", - "team_ids":"", - "token_url":"https://github.com/login/oauth/access_token" - }, - "auth.google":{ - "allow_sign_up":"false","allowed_domains":"", - "api_url":"https://www.googleapis.com/oauth2/v1/userinfo", - "auth_url":"https://accounts.google.com/o/oauth2/auth", - "client_id":"some_client_id", - "client_secret":"************", - "enabled":"false", - "scopes":"https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email", - "token_url":"https://accounts.google.com/o/oauth2/token" - }, - "auth.ldap":{ - "config_file":"/etc/grafana/ldap.toml", - "enabled":"false" - }, - "auth.proxy":{ - "auto_sign_up":"true", - "enabled":"false", - "header_name":"X-WEBAUTH-USER", - "header_property":"username" - }, - "dashboards.json":{ - "enabled":"false", - "path":"/var/lib/grafana/dashboards" - }, - "database":{ - "host":"127.0.0.1:0000", - "name":"grafana", - "password":"************", - "path":"grafana.db", - "ssl_mode":"disable", - "type":"sqlite3", - "user":"root" - }, - "emails":{ - "templates_pattern":"emails/*.html", - "welcome_email_on_sign_up":"false" - }, - "event_publisher":{ - "enabled":"false", - "exchange":"grafana_events", - "rabbitmq_url":"amqp://localhost/" - }, - "log":{ - "buffer_len":"10000", - "level":"Info", - "mode":"file" - }, - "log.console":{ - "level":"" - }, - "log.file":{ - "daily_rotate":"true", - "file_name":"", - "level":"", - "log_rotate":"true", - "max_days":"7", - "max_lines":"1000000", - "max_lines_shift":"28", - "max_size_shift":"" - }, - "paths":{ - "data":"/tsdb/grafana", - "logs":"/logs/apps/grafana"}, - "security":{ - "admin_password":"************", - "admin_user":"admin", - "cookie_remember_name":"grafana_remember", - "cookie_username":"grafana_user", - "disable_gravatar":"false", - "login_remember_days":"7", - "secret_key":"************" - }, - "server":{ - "cert_file":"", - "cert_key":"", - "domain":"mygraf.com", - "enable_gzip":"false", - "enforce_domain":"false", - "http_addr":"127.0.0.1", - "http_port":"0000", - "protocol":"http", - "root_url":"%(protocol)s://%(domain)s:%(http_port)s/", - "router_logging":"true", - "static_root_path":"public" - }, - "session":{ - "cookie_name":"grafana_sess", - "cookie_secure":"false", - "gc_interval_time":"", - "provider":"file", - "provider_config":"sessions", - "session_life_time":"86400" - }, - "smtp":{ - "cert_file":"", - "enabled":"false", - "from_address":"admin@grafana.localhost", - "host":"localhost:25", - "key_file":"", - "password":"************", - "skip_verify":"false", - "user":""}, - "users":{ - "allow_org_create":"true", - "allow_sign_up":"false", - "auto_assign_org":"true", - "auto_assign_org_role":"Viewer" - } - } + HTTP/1.1 200 + Content-Type: application/json + + { + "DEFAULT": + { + "app_mode":"production"}, + "analytics": + { + "google_analytics_ua_id":"", + "reporting_enabled":"false" + }, + "auth.anonymous":{ + "enabled":"true", + "org_name":"Main Org.", + "org_role":"Viewer" + }, + "auth.basic":{ + "enabled":"false" + }, + "auth.github":{ + "allow_sign_up":"false", + "allowed_domains":"", + "allowed_organizations":"", + "api_url":"https://api.github.com/user", + "auth_url":"https://github.com/login/oauth/authorize", + "client_id":"some_id", + "client_secret":"************", + "enabled":"false", + "scopes":"user:email", + "team_ids":"", + "token_url":"https://github.com/login/oauth/access_token" + }, + "auth.google":{ + "allow_sign_up":"false","allowed_domains":"", + "api_url":"https://www.googleapis.com/oauth2/v1/userinfo", + "auth_url":"https://accounts.google.com/o/oauth2/auth", + "client_id":"some_client_id", + "client_secret":"************", + "enabled":"false", + "scopes":"https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email", + "token_url":"https://accounts.google.com/o/oauth2/token" + }, + "auth.ldap":{ + "config_file":"/etc/grafana/ldap.toml", + "enabled":"false" + }, + "auth.proxy":{ + "auto_sign_up":"true", + "enabled":"false", + "header_name":"X-WEBAUTH-USER", + "header_property":"username" + }, + "dashboards.json":{ + "enabled":"false", + "path":"/var/lib/grafana/dashboards" + }, + "database":{ + "host":"127.0.0.1:0000", + "name":"grafana", + "password":"************", + "path":"grafana.db", + "ssl_mode":"disable", + "type":"sqlite3", + "user":"root" + }, + "emails":{ + "templates_pattern":"emails/*.html", + "welcome_email_on_sign_up":"false" + }, + "event_publisher":{ + "enabled":"false", + "exchange":"grafana_events", + "rabbitmq_url":"amqp://localhost/" + }, + "log":{ + "buffer_len":"10000", + "level":"Info", + "mode":"file" + }, + "log.console":{ + "level":"" + }, + "log.file":{ + "daily_rotate":"true", + "file_name":"", + "level":"", + "log_rotate":"true", + "max_days":"7", + "max_lines":"1000000", + "max_lines_shift":"28", + "max_size_shift":"" + }, + "paths":{ + "data":"/tsdb/grafana", + "logs":"/logs/apps/grafana"}, + "security":{ + "admin_password":"************", + "admin_user":"admin", + "cookie_remember_name":"grafana_remember", + "cookie_username":"grafana_user", + "disable_gravatar":"false", + "login_remember_days":"7", + "secret_key":"************" + }, + "server":{ + "cert_file":"", + "cert_key":"", + "domain":"mygraf.com", + "enable_gzip":"false", + "enforce_domain":"false", + "http_addr":"127.0.0.1", + "http_port":"0000", + "protocol":"http", + "root_url":"%(protocol)s://%(domain)s:%(http_port)s/", + "router_logging":"true", + "static_root_path":"public" + }, + "session":{ + "cookie_name":"grafana_sess", + "cookie_secure":"false", + "gc_interval_time":"", + "provider":"file", + "provider_config":"sessions", + "session_life_time":"86400" + }, + "smtp":{ + "cert_file":"", + "enabled":"false", + "from_address":"admin@grafana.localhost", + "host":"localhost:25", + "key_file":"", + "password":"************", + "skip_verify":"false", + "user":""}, + "users":{ + "allow_org_create":"true", + "allow_sign_up":"false", + "auto_assign_org":"true", + "auto_assign_org_role":"Viewer" + } + } ### Global Users @@ -1371,24 +1372,24 @@ Create new user **Example Request**: - POST /api/admin/users HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - - { - "name":"User", - "email":"user@graf.com", - "login":"user", - "password":"userpassword" - } + POST /api/admin/users HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + + { + "name":"User", + "email":"user@graf.com", + "login":"user", + "password":"userpassword" + } **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"message":"User created"} + HTTP/1.1 200 + Content-Type: application/json + + {"id":5,"message":"User created"} ### Password for User @@ -1398,17 +1399,17 @@ Change password for specific user **Example Request**: - PUT /api/admin/users/2/password HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + PUT /api/admin/users/2/password HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {"password":"userpassword"} + HTTP/1.1 200 + Content-Type: application/json + + {"password":"userpassword"} ### Permissions @@ -1416,17 +1417,17 @@ Change password for specific user **Example Request**: - PUT /api/admin/users/2/permissions HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk - + PUT /api/admin/users/2/permissions HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {message: "User permissions updated"} + HTTP/1.1 200 + Content-Type: application/json + + {message: "User permissions updated"} ### Delete global User @@ -1434,14 +1435,14 @@ Change password for specific user **Example Request**: - DELETE /api/admin/users/2 HTTP/1.1 - Accept: application/json - Content-Type: application/json - Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk + DELETE /api/admin/users/2 HTTP/1.1 + Accept: application/json + Content-Type: application/json + Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk **Example Response**: - HTTP/1.1 200 - Content-Type: application/json - - {message: "User deleted"} + HTTP/1.1 200 + Content-Type: application/json + + {message: "User deleted"} diff --git a/docs/sources/reference/keyboard_shortcuts.md b/docs/sources/reference/keyboard_shortcuts.md new file mode 100644 index 0000000000000..c9da3d5c6a131 --- /dev/null +++ b/docs/sources/reference/keyboard_shortcuts.md @@ -0,0 +1,30 @@ +page_title: Keyboard Shortcuts +page_description: Keyboard Shortcuts for Grafana +page_keywords: grafana, export, import, documentation +--- + +# Keyboard Shortcuts + +No mouse? No problem. Grafana has extensive keyboard shortcuts to allow you to navigate throughout the interface. This comes in especially handy when dealing with dealing with single-purpose machines powering on-wall displays that may not have a mouse available. + +## Dashboard Keyboard Shortcuts + +Press `Shift`+`?` to open the keyboard shortcut dialog from anywhere within the dashboard views. + + + + +|Shortcut|Action| +|---|---| +|`Esc`|Exit fullscreen edit/view mode, close search or any editor view| +|`F`|Open dashboard search view (also contains import/playlist controls)| +|`CTRL`+`S`|Save dashboard| +|`CTRL`+`H`|Hide row controls| +|`CTRL`+`Z`|Zoom out| +|`CTRL`+`R`|Refresh (Fetches new data and rerenders panels)| +|`CTRL`+`O`|Enable/Disable shared graph crosshair| + + +**Note**: Grafana keyboard shortcuts are the same across operating system. + +Have a suggestion for a new keyboard shortcut? Let us know. diff --git a/docs/sources/reference/playlist.md b/docs/sources/reference/playlist.md index 29c7717939c16..b87ebc25be03a 100644 --- a/docs/sources/reference/playlist.md +++ b/docs/sources/reference/playlist.md @@ -4,15 +4,17 @@ page_description: Playlist guide for Grafana page_keywords: grafana, playlist, documentation --- -## About the Playlist +# Playlist -The Playlist is a special type of Dashboard that rotates through a particular list of two or more Dashboards. They can be a great way to build situational awareness or just show off your metrics to your team or visitors. Since Dashboards in Grafana automatically scale to any resolution they're perfect for big screens! +The Playlist is a special type of Dashboard that rotates through a list of Dashboards. A Playlist can be a great way to build situational awareness, or just show off your metrics to your team or visitors. + +Since Grafana automatically scales Dashboards to any resolution they're perfect for big screens! ## Configuring the Playlist The Playlist can be accessed from the main Dashboard picker. Click the 'Playlist' button at the bottom of the picker to access the Playlist functionality. -![](/img/v2/dashboard_search.jpg) + Since the Playlist is basically a list of Dashboards, ensure that all the Dashboards you want to appear in your Playlist are added here. You can search Dashboards by name (or use a regular expression). @@ -24,9 +26,9 @@ You can configure a time interval for Grafana to wait on a particular Dashboard ## Starting and controlling the Playlist -To start the Playlist, click the green "Start" button +To start the Playlist, click the green "Start" button -Playlists can also be manually controlled utilizing the Playlist controls at the top of screen when in Playlist mode. +Playlists can also be manually controlled utilizing the Playlist controls at the top of screen when in Playlist mode. Click the stop button to stop the Playlist, and exit to the current Dashboard. Click the next button to advance to the next Dashboard in the Playlist. diff --git a/docs/sources/reference/search.md b/docs/sources/reference/search.md index 159fe4056e8fe..e2d6647229ca9 100644 --- a/docs/sources/reference/search.md +++ b/docs/sources/reference/search.md @@ -1,34 +1,54 @@ ---- -page_title: Search guide -page_description: Search guide -page_keywords: grafana, time range, guide, documentation +page_title: Dashboard Search +page_description: Dashboard Search in Grafana +page_keywords: grafana, search, guide, documentation --- -# Search Guide +# Dashboard Search -To search and load dashboards click the open folder icon in the header or use the shortcut CTRL+F. +Dashboards can be searched by the dashboard name, filtered by one (or many) tags or filtered by starred status. The dashboard search is accessed through the dashboard picker, available in the dashboard top nav area. -## Tags -![](/img/v1/dashboards_tags2_search.png) + -Click on any dashboard or use the down arrow key to navigate the search result and hit enter to open the selected dashboard. +1. `Dashboard Picker`: The Dashboard Picker is your primary navigation tool to move between dashboards. It is present on all dashboards, and open the Dashboard Search. The dashboard picker also doubles as the title of the current dashboard. +2. `Search Bar`: The search bar allows you to enter any string and search both database and file based dashboards in real-time. +3. `Starred`: The starred link allows you to filter the list to display only starred dashboards. +4. `Tags`: The tags filter allows you to filter the list by dashboard tags. -If you have a lot of dashboards use tags to organize them. You can add tags in the dashboards settings modal. +When using only a keyboard, you can use your keyboard arrow keys to navigate the results, hit enter to open the selected dashboard. -![](/img/v1/dashboards_tags1.png) +## Find by dashboard name -After you have added tags you can now view all available tags in the search popup. Click the tags link to the right in the search box, or just hit tab and then enter. You should now see a list of all tags. You can filter the tag list by continue writing in the search box, for example: "tags!:mongodb". + -![](/img/v1/dashboards_tags3_search.png) +To search and load dashboards click the open folder icon in the header or use the shortcut `CTRL`+`F`. Begin typing any part of the desired dashboard names. Search will return results for for any partial string match in real-time, as you type. -Click on any tag in the tag list to show dashboards with that tag (or just down arrow key to select a tag then enter key). +Dashboard search is: +- Real-time +- *Not* case sensitive +- Functional across stored *and* file based dashboards. -## Keyboard navigation +## Filter by Tag(s) -While the search input has focus you can use your keyboard arrow keys to navigate the results, hit enter to open the selected dashboard. +Tags are a great way to organize your dashboards, especially as the number of dashboards grow. Tags can be added and managed in the dashboard `Settings`. +To filter the dashboard list by tag, click on any tag appearing in the right column. The list may be further filtered by clicking on additional tags: + +Alternately, to see a list of all available tags, click the tags link in the search bar. All tags will be shown, and when a tag is selected, the dashboard search will be instantly filtered: + +When using only a keyboard: `tab` to focus on the *tags* link, `▼` down arrow key to find a tag and select with the `Enter` key. +**Note**: When multiple tags are selected, Grafana will show dashboards that include **all**. + + +## Filter by Starred + +Starring is a great way to organize and find commonly used dashboards. To show only starred dashboards in the list, click the *starred* link in the search bar: + + + +When using only a keyboard: `tab` to focus on the *stars* link, `▼` down arrow key to find a tag and select with the `Enter` key. \ No newline at end of file diff --git a/docs/sources/reference/sharing.md b/docs/sources/reference/sharing.md index 16ddcd8f01b46..c20f4e5f67c9a 100644 --- a/docs/sources/reference/sharing.md +++ b/docs/sources/reference/sharing.md @@ -5,7 +5,7 @@ page_keywords: grafana, sharing, guide, documentation --- # Sharing features -Grafana provides a number of ways to share a dashboard or a specfic panel to other users within your +Grafana provides a number of ways to share a dashboard or a specific panel to other users within your organization. It also provides ways to publish interactive snapshots that can be accessed by external partners. ## Share dashboard @@ -30,7 +30,8 @@ after a certain time period. ## Share Panel Click a panel title to open the panel menu, then click share in the panel menu to open the Share Panel dialog. Here you have access to a link that will take you to exactly this panel with the current time range and selected template variables. -You also get a link to service side rendered PNG of the panel. Useful if you want to shara image of the panel. +You also get a link to service side rendered PNG of the panel. Useful if you want to share an image of the panel. +Please note that for OSX and Windows, you will need to ensure that a `phantomjs` binary is available under `vendor/phantomjs/phantomjs`. For Linux, a `phantomjs` binary is included - however, you should ensure that any requisite libraries (e.g. libfontconfig) are available. ### Embed Panel You can embed a panel using an iframe on another web site. This tab will show you the html that you need to use. @@ -42,4 +43,4 @@ Example: ``` Below there should be an interactive Grafana graph embedded in an iframe: - + diff --git a/docs/sources/reference/singlestat.md b/docs/sources/reference/singlestat.md index 40a3551a4e0b8..91724e8988293 100644 --- a/docs/sources/reference/singlestat.md +++ b/docs/sources/reference/singlestat.md @@ -8,7 +8,7 @@ page_keywords: grafana, singlestat, panel, documentation ![](/img/v1/singlestat_panel2.png) -The singlestat Panel allows you to show the one main summary stat of a single series (like max, min, avg, sum). It also provides thresholds to color the stat or the Panel background. +The Singlestat Panel allows you to show the one main summary stat of a SINGLE series. It reduces the series into a single number (by looking at the max, min, average, or sum of values in the series). Singlestat also provides thresholds to color the stat or the Panel background. It can also translate the single number into a text value, and show a sparkline summary of the series. ### Singlestat Panel Configuration @@ -17,11 +17,11 @@ The singlestat panel has a normal query editor to allow you define your exact me 1. `Big Value`: Big Value refers to how we display the main stat for the Singlestat Panel. This is always a single value that is displayed in the Panel in between two strings, `Prefix` and `Suffix`. The single number is calculated by choosing a function (min,max,average,current,total) of your metric query. This functions reduces your query into a single numeric value. -2. `Font Size`: You can use this section -3. `Values`: The Value fields let you set the function (min, max, average, current, total) that your entire query is reduced into a single value with. You can also set the font size of theand font-size (as a %) of the metric query that the Panel is configured with. This reduces the entire query into a single summary value that is displayed. +2. `Font Size`: You can use this section to select the font size of the different texts in the Singlestat Panel, i.e. prefix, value and postfix. +3. `Values`: The Value fields let you set the function (min, max, average, current, total) that your entire query is reduced into a single value with. You can also set the font size of the Value field and font-size (as a %) of the metric query that the Panel is configured with. This reduces the entire query into a single summary value that is displayed. 4. `Postfixes`: The Postfix fields let you define a custom label and font-size (as a %) to appear *after* the value 5. `Units`: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value. -6. `Decimals`: The Decimal field allows you to override the automatic decimal precision, and set it explicitely. +6. `Decimals`: The Decimal field allows you to override the automatic decimal precision, and set it explicitly. ### Coloring @@ -29,15 +29,15 @@ The coloring options of the Singlestat Panel config allow you to dynamically cha -1. `Background`: This checkbox applies the configured thresholds and colors to the entirity of the Singlestat Panel background. +1. `Background`: This checkbox applies the configured thresholds and colors to the entirety of the Singlestat Panel background. 2. `Value`: This checkbox applies the configured thresholds and colors to the summary stat. -3. `Thresholds`: Change the background and value colors dyanmically within the panel, depending on the Singlestat value. The threshold field accepts **3 comma-separated** values, corresponding to the three colors directly to the right. +3. `Thresholds`: Change the background and value colors dynamically within the panel, depending on the Singlestat value. The threshold field accepts **3 comma-separated** values, corresponding to the three colors directly to the right. 4. `Colors`: Select a color and opacity 5. `Invert order`: This link toggles the threshold color order.
For example: Green, Orange, Red () will become Red, Orange, Green (). ### Spark Lines -Sparklines are a great way of seeing the historical data related to the summary stat, providing valuable context at a glance. Sparklines act differently than traditional graph panels and do not include x or y axis, coordinates, a legend, or ability to interact with the graph. +Sparklines are a great way of seeing the historical data related to the summary stat, providing valuable context at a glance. Sparklines act differently than traditional Graph Panels and do not include x or y axis, coordinates, a legend, or ability to interact with the graph. @@ -54,4 +54,21 @@ Value to text mapping allows you to translate the value of the summary stat into +## Troubleshooting +### Multiple Series Error + + + + +Grafana 2.5 introduced stricter checking for multiple-series on singlestat panels. In previous versions, the panel logic did not verify that only a single series was used, and instead, displayed the first series encountered. Depending on your data source, this could have lead to inconsistent data being shown and/or a general confusion about which metric was being displayed. + +To fix your singlestat panel: + +- Edit your panel by clicking the Panel Title and selecting *Edit*. + +- Do you have multiple queries in the metrics tab? + - Solution: Select a single query to visualize. You can toggle whether a query is visualized by clicking the eye icon on each line. If the error persists, continue to the next solution. + +- Do you have one query? + - Solution: This likely means your query is returning multiple series. You will want to reduce this down to a single series. This can be accomplished in many ways, depending on your data source. Some common practices include summing the series, averaging or any number of other functions. Consult the documentation for your data source for additional information. diff --git a/docs/sources/reference/templating.md b/docs/sources/reference/templating.md index e1a321001122f..e605af845ea88 100644 --- a/docs/sources/reference/templating.md +++ b/docs/sources/reference/templating.md @@ -1,53 +1,81 @@ ---- -page_title: Templated dashboards -page_description: Templated dashboards +page_title: Dashboard Templating +page_description: Dashboard Templating page_keywords: grafana, templating, variables, guide, documentation --- -# Templated Dashboards +# Dashboard Templating ![](/img/v2/templating_var_list.png) ## Overview -Templating allows your Dashboards to be more interactive and dynamic. You can create Template variables that can be used practically anywhere in Grafana: metric queries on individual panels, series names, and titles. +Dashboard Templating allows you to make your Dashboards more interactive and dynamic. -Quickly change Template variables to show different graphs and metrics for different server and applications. +They’re one of the most powerful and most used features of Grafana, and they’ve recently gotten even more attention in Grafana 2.0 and Grafana 2.1. + +You can create Dashboard Template variables that can be used practically anywhere in a Dashboard: data queries on individual Panels (within the Query Editor), the names in your legends, or titles in Panels and Rows. + +You can configure Dashboard Templating by clicking the dropdown cog on the top of the Dashboard while viewing it. -You can find and configure the Templating for a particular Dashboard by clicking the dropdown cog on the top of the Dashboard when viewing it. ## Variable types -There are three different types of Template variables. They can all be used to create dynamic variables that you can use throughout the Dashboard. They differ slightly in how they create values. +There are three different types of Template variables: query, custom, and interval. + +They can all be used to create dynamic variables that you can use throughout the Dashboard, but they differ in how they get the data for their values. + ### Query -> The Query type is often Data Source specific. Please consult the appropriate documentation for your particular Data Source. + > Note: The Query type is Data Source specific. Please consult the appropriate documentation for your particular Data Source. + +Query is the most common type of Template variable. Use the `Query` template type to generate a dynamic list of variables, simply by allowing Grafana to explore your Data Source metric namespace when the Dashboard loads. + +For example a query like `prod.servers.*` will fill the variable with all possible values that exists in that wildcard position (in the case of the Graphite Data Source). + +You can even create nested variables that use other variables in their definition. For example `apps.$app.servers.*` uses the variable $app in its own query definition. + +You can utilize the special ** All ** value to allow the Dashboard user to query for every single Query variable returned. Grafana will automatically translate ** All ** into the appropriate format for your Data Source. + +#### Multi-select +As of Grafana 2.1, it is now possible to select a subset of Query Template variables (previously it was possible to select an individual value or 'All', not multiple values that were less than All). This is accomplished via the Multi-Select option. If enabled, the Dashboard user will be able to enable and disable individual variables. + +The Multi-Select functionality is taken a step further with the introduction of Multi-Select Tagging. This functionality allows you to group individual Template variables together under a Tag or Group name. + +For example, if you were using Templating to list all 20 of your applications, you could use Multi-Select Tagging to group your applications by function or region or criticality, etc. + + > Note: Multi-Select Tagging functionality is currently experimental but is part of Grafana 2.1. To enable this feature click the enable icon when editing Template options for a particular variable. + + + +Grafana gets the list of tags and the list of values in each tag by performing two queries on your metric namespace. + +The Tags query returns a list of Tags. -This is the most common type of Template variable. Using the Query type to generate a dynamic list of variables, simply by allowing Grafana to explore your Data Source metric namespace when the Dashboard loads. +The Tag values query returns the values for a given Tag. -For example a query like `prod.servers.*` will fill the variable with all possible values that exists in that wildcard position (in the case of the Graphite Data Source). +Note: a proof of concept shim that translates the metric query into a SQL call is provided. This allows you to maintain your tag:value mapping independently of your Data Source. -You can even create nested variables that use other variables in their definition. For example `apps.$app.servers.*` uses the variable `$app` in its own query definition. +Once configured, Multi-Select Tagging provides a convenient way to group and your template variables, and slice your data in the exact way you want. The Tags can be seen on the right side of the template pull-down. -You can utilize the special "All" value to allow the Dashboard user to query for every single Query variable returned. Grafana will automatically translate All into the appropriate format for your Data Source. +![](/img/v2/multi-select.gif) -As of Grafana 2.1, it is now possible to select a subset of Query Template variables (previously it was possible to select an individual value or 'All', not multiple values that were less than All). This is accomplished via the Multi-Select option. If enabled, the Dashboard user will be able to enable and disable individual variables. ### Interval -Use the Interval type to create Template variables aroundr time ranges (eg. `1m`,`1h`, `1d`). There is also a special `auto` option that will change depending on the current time range, you can specify how many times the current time range should be divided to calculate the current `auto` range. +Use the `Interval` type to create Template variables around time ranges (eg. `1m`,`1h`, `1d`). There is also a special `auto` option that will change depending on the current time range, you can specify how many times the current time range should be divided to calculate the current `auto` range. ![](/img/v2/templated_variable_parameter.png) ### Custom -Use the Custom type to manually create Template variables around explicit values that are hard coded in the Dashboard and not dependent on any Data Source. You can specify multiple Custom Template values by separating them with a comma. +Use the `Custom` type to manually create Template variables around explicit values that are hard-coded into the Dashboard, and not dependent on any Data Source. You can specify multiple Custom Template values by separating them with a comma. ## Utilizing Template Variables with Repeating Panels and Repeating Rows Template Variables can be very useful to dynamically change what you're visualizing on a given panel. Sometimes, you might want to create entire new Panels (or Rows) based on what Template Variables have been selected. This is now possible in Grafana 2.1. -Once you've got your Template variables (of any type) configured the way you'd like, check out the Repeating Panels and Repeating Row documentatione +Once you've got your Template variables (of any type) configured the way you'd like, check out the Repeating Panels and Repeating Row documentation ## Screencast - Templated Graphite Queries diff --git a/docs/sources/reference/timerange.md b/docs/sources/reference/timerange.md index 2806c87623d15..2ab428622bcc1 100644 --- a/docs/sources/reference/timerange.md +++ b/docs/sources/reference/timerange.md @@ -4,30 +4,65 @@ page_description: Time range user guide page_keywords: grafana, time range, guide, documentation --- -# Dashboard time picker +# Time Range Controls Grafana provides numerous ways to manage the time ranges of the data being visualized, both at the Dashboard-level and the Panel-level. -![](/img/v1/time_range_controls.png) + -In the top right, you have the master Dashboard time picker (it's inbetween the 'Zoom out' and the 'Refresh' links). +In the top right, you have the master Dashboard time picker (it's in between the 'Zoom out' and the 'Refresh' links). -From this dropdown you can: +1. `Current time range & refresh interval`: This shows the current dashboard time and refresh interval. It also acts as the menu button to toggle the time range controls. +2. `Quick ranges`: Quick ranges are preset values to choose a relative time. At this time, quick ranges are not configurable, and will appear on every dashboard. +3. `Time range`: The time range section allows you to mix both explicit and relative ranges. The explicit time range format is `YYYY-MM-DD HH:MM:SS` +4. `Refreshing every:` When enabled, auto-refresh will reload the dashboard at the specified time range. Auto-refresh is most commonly used with relative time ranges ending in `now`, so new data will appear when the dashboard refreshes. -1. Specify an exact time range (eg. "October 13 12:01 to October 14 12:05) -2. Choose a relative time (eg. "Last 15 minutes","Last 1 week") -3. Configure auto-refresh options +These settings apply to all Panels in the Dashboard (except those with Panel Time Overrides enabled) -All of this applies to all Panels in the Dashboard (except those with Panel Time Overrides enabled) +## Time Units -## Customize relative time and auto auto-refresh options +The following time units are supported: `s (seconds)`, `m (minutes)`, `h (hours)`, `d (days)`, `w (weeks)`, `M (months)`, `y (years)`. The minus operator allows you to step back in time, relative to now. If you wish to display the full period of the unit (day, week, month, etc...), append `/$unit` to the end. + +Take a look at some examples to seen these concepts in practice: + +Example Relative Range | From: | To: +-------------- | ----- | --- +Last 5 minutes | `now-5m` | `now` +The day so far | `now/d` | `now` +This week | `now/w` | `now/w` +Week to date | `now/w` | `now` +Previous Month | `now-1M/M` | `now-1M/M` + + +## Dashboard-Level Time Picker Settings + +There are two settings available from the Dashboard Settings area, allowing customization of the auto-refresh intervals and the definition of `now`. + + + +### Auto-Refresh Options It's possible to customize the options displayed for relative time and the auto-refresh options. -From Dashboard settings, click the Timepicker tab. From here you can specify the relative and auto refresh intervals. The Timepicker tab settings are saved on a per Dashboard basis. Entries are comma separated and accept a number followed by one of the following units: s (seconds), m (minutes), h (hours), d (days), w (weeks), M (months), y (years). +From Dashboard settings, click the Timepicker tab. From here you can specify the relative and auto-refresh intervals. The Timepicker tab settings are saved on a per Dashboard basis. Entries are comma separated and accept any valid time unit. + +### Defining Now + +Users often ask, [when will then be now](https://www.youtube.com/watch?v=VeZ9HhHU86o)? Grafana offers the ability to override the `now` value on a per dashboard basis. Most commonly, this feature is used to accommodate known delays in data aggregation to avoid null values. + +## Panel time overrides & timeshift + +You can override the relative time range for individual panels, causing them to be different than what is selected in the Dashboard time picker in the upper right. This allows you to show metrics from different time periods or days at the same time. + + + +You control these overrides in panel editor mode and the tab `Time Range`. + + -![](/img/v1/timepicker_editor.png) +When you zoom or change the Dashboard time to a custom absolute time range, all panel overrides will be disabled. The panel relative time override is only active when the dashboard time is also relative. The panel timeshift override is always active, even when the dashboard time is absolute. -## Panel time override +The `Hide time override info` option allows you to hide the override info text that is by default shown in the +upper right of a panel when overridden time range options. -In Grafana 2.0, it's now possible for individual Panels to override the Dashboard time picker. Please check out the [whats new in 2.0 guide](../../guides/whats-new-in-v2/) for further information +Note: You can only override the dashboard time with relative time ranges. Absolute time ranges are not available. diff --git a/docs/sources/tutorials/hubot_howto.md b/docs/sources/tutorials/hubot_howto.md new file mode 100644 index 0000000000000..85ebb3e7c8557 --- /dev/null +++ b/docs/sources/tutorials/hubot_howto.md @@ -0,0 +1,126 @@ +--- +page_title: How To integrate Hubot and Grafana +page_description: Hubot Grafana install guide +page_keywords: grafana, tutorials, hubot, slack, hipchat, setup, install, config +author: Torkel Ödegaard +--- + +# How to integrate Hubot with Grafana + +Grafana 2.0 shipped with a great feature that enables it to render any graph or panel to a PNG image. +No matter what data source you are using, the PNG image of the Graph will look the same +as it does in your browser. + +This guide will show you how to install and configure the [Hubot-Grafana](https://github.com/stephenyeargin/hubot-grafana) +plugin. This plugin allows you to tell hubot to render any dashboard or graph right from a channel in +Slack, Hipchat or Basecamp. The bot will respond with an image of the graph and a link that will +take you to the graph. + +> *Amazon S3 Required*: The hubot-grafana script will upload the rendered graphs to Amazon S3. This +> is so Hipchat and Slack can show them reliably (they require the image to be publicly available). + +
+ +
+ +## What is Hubot? +[Hubot](https://hubot.github.com/) is an universal and extensible chat bot that can be used with many chat +services and has a huge library of third party plugins that allow you to automate anything from your +chat rooms. + +## Install Hubot +Hubot is very easy to install and host. If you do not already have a bot up and running please +read the official [Getting Started With Hubot](https://hubot.github.com/docs/) guide. + +## Install Hubot-Grafana script + +In your Hubot project repo install the Grafana plugin using `npm`: + + npm install hubot-grafana --save + +Edit the file external-scripts.json, and add hubot-grafana to the list of plugins. + +```json +[ +"hubot-pugme", +"hubot-shipit", +"hubot-grafana" +] +``` + +## Configure + +The `hubot-grafana` plugin requires a number of environment variables to be set in order to work properly. + + export HUBOT_GRAFANA_HOST=http://play.grafana.org + export HUBOT_GRAFANA_API_KEY=abcd01234deadbeef01234 + export HUBOT_GRAFANA_S3_BUCKET=mybucket + export HUBOT_GRAFANA_S3_ACCESS_KEY_ID=ABCDEF123456XYZ + export HUBOT_GRAFANA_S3_SECRET_ACCESS_KEY=aBcD01234dEaDbEef01234 + export HUBOT_GRAFANA_S3_PREFIX=graphs + export HUBOT_GRAFANA_S3_REGION=us-standard + +### Grafana server side rendering +The hubot plugin will take advantage of the Grafana server side rendering feature that can +render any panel on the server using phantomjs. Grafana ships with a phantomjs binary (linux only). + +To verify that this feature works try the `Direct link to rendered image` link in the panel share dialog. +If you do not get an image when opening this link verify that the required font packages are installed for phantomjs to work. + +### Grafana API Key + +You need to set the environment variable `HUBOT_GRAFANA_API_KEY` to a Grafana API Key. +You can add these from the API Keys page which you find in the Organization dropdown. + +### Amazon S3 +The `S3` options are optional but for the images to work properly in services like Slack and Hipchat they need +to publicly available. By specifying the `S3` options the hubot-grafana script will publish the rendered +panel to `S3` and it will use that URL when it posts to Slack or Hipchat. + +## Hubot commands + +- `hubot graf list` + - Lists the available dashboards +- `hubot graf db graphite-carbon-metrics` + - Graph all panels in the dashboard +- `hubot graf db graphite-carbon-metrics:3` + - Graph only panel with id 3 of a particular dashboard +- `hubot graf db graphite-carbon-metrics:cpu` + - Graph only the panels containing "cpu" (case insensitive) in the title +- `hubot graf db graphite-carbon-metrics now-12hr` + - Get a dashboard with a window of 12 hours ago to now +- `hubot graf db graphite-carbon-metrics now-24hr now-12hr` + - Get a dashboard with a window of 24 hours ago to 12 hours ago +- `hubot graf db graphite-carbon-metrics:3 now-8d now-1d` + - Get only the third panel of a particular dashboard with a window of 8 days ago to yesterday +- `hubot graf db graphite-carbon-metrics host=carbon-a` + - Get a templated dashboard with the `$host` parameter set to `carbon-a` + +## Aliases +Some of the hubot commands above can lengthy and you might have to remember the dashboard slug (url id). +If you have a few favorite graphs you want to be able check up on often (let's say from your mobile) you +can create hubot command aliases with the hubot script `hubot-alias`. + +Install it: + + npm i --save hubot-alias + +Now add `hubot-alias` to the list of plugins in `external-scripts.json` and restart hubot. + +Now you can add an alias like this: + +- `hubot alias graf-lb=graf db loadbalancers:2 now-20m` + +
+ Using the alias:
+ +
+ +## Summary + +Grafana is going to ship with integrated Slack and Hipchat features some day but you do +not have to wait for that. Grafana 2 shipped with a very clever server side rendering feature +that can render any panel to a png using phantomjs. The hubot plugin for Grafana is something +you can install and use today! + + diff --git a/docs/sources/tutorials/index.md b/docs/sources/tutorials/index.md new file mode 100644 index 0000000000000..fd064ba5b152e --- /dev/null +++ b/docs/sources/tutorials/index.md @@ -0,0 +1,20 @@ +--- +page_title: Grafana Tutorials +page_description: Tutorials +page_keywords: grafana, tutorials +--- + +# Tutorials + +This section of the docs contains a series for tutorials and stack setup guides. + +## Articles + +- [How to integrate Hubot with Grafana](hubot_howto.md) + +## External links + +- [Installing Graphite and Grafana on RHEL 6, 7, or Ubuntu in under 30 minutes](http://blog.pkiwi.com/installing-graphite-and-grafana-on-rhel-6-7-or-ubuntu-in-under-30-minutes/) +- [Monitoring Urbancode deployments with Docker, Graphite, Grafana, collectd and chef!](http://cloud.boriskuschel.com/2015/08/monitoring-urbancode-deploments-with.html) +- [Scripting Grafana dashboards](http://anatolijd.blogspot.se/2014/07/scripting-grafana-dashboards.html) + diff --git a/docs/sources/tutorials/stack_guide_graphite.md b/docs/sources/tutorials/stack_guide_graphite.md new file mode 100644 index 0000000000000..1145cab224918 --- /dev/null +++ b/docs/sources/tutorials/stack_guide_graphite.md @@ -0,0 +1,263 @@ +--- +page_title: Graphite + Grafana + StatsD - Stack Setup Guide +page_description: Installation and configuration guide & how to for Grafana, Graphite & StatsD +page_keywords: grafana, tutorials, graphite, statsd, setup, configuration, howto, installation +author: Torkel Ödegaard +--- + +# Stack Setup & Config Guide: Graphite + Grafana + StatsD + +This lengthy article will guide you through installation, configuration and getting started with the amazing metric +stack that is composed of Graphite, Grafana and StatsD. + +Graphite is still king when it comes to time series databases due to its simple data model, ingestion +with integrated aggregation & rollups, amazing query features and speed. No other time series +database has yet to match Graphite's query flexibility and analytics potential. + +Graphite has a reputation for being tricky to install and scale. This guide aims to show +that is not really the case, or, at least, that it is a lot better than you expect. + +> This guides does not only aim to be only be an install guide but to also teach you +> of the mechanics of metric collection, aggregation and querying. How Graphite +> stores and aggregates data is very important to understand in order to not +> get mislead by graphs. + +## Installation - Ubuntu + +To begin with we are going to install the 3 main components that define our metric stack. Later in the guide we will +install StatsD, but that is optional. + +- Carbon is the graphite ingestion daemon responsible for +receiving metrics and storing them. +- Graphite-api is light weight version of graphite-web with only the HTTP api and is +responsible for executing metric queries. +- Grafana as the frontend to visualize metrics and the tool to help you build metric +queries that will make the most out of your collected metrics. + +### Carbon + +Graphite & Carbon are written in python, so we will start by installing python packages. + +``` +apt-get install \ + git \ + build-essential \ + libffi-dev libcairo2-dev \ + python-django \ + python-django-tagging \ + python-simplejson \ + python-memcache \ + python-ldap \ + python-cairo \ + python-twisted \ + python-pysqlite2 \ + python-support \ + python-dev \ + python-pip +``` + +Next we will clone carbon and whisper and install these components. Whisper is just a lib used +by carbon to write metrics to disk. + + cd /usr/local/src + git clone https://github.com/graphite-project/carbon.git + git clone https://github.com/graphite-project/whisper.git + + cd whisper && python setup.py install && cd .. + cd carbon && python setup.py install && cd .. + +### Configure carbon.conf + +Copy example carbon config: +``` +cp /opt/graphite/conf/carbon.conf.example /opt/graphite/conf/carbon.conf +``` + +Edit the config file `/opt/graphite/conf/carbon.conf`, find line `ENABLE_UPD_LISTENER` and +change this setting to `True`. + +### Configure storage-schemas.conf + +Create a new file at `/opt/graphite/conf/storage-schemas.conf` with the following content: + +``` +[carbon] +pattern = ^carbon\..* +retentions = 1m:30d,10m:1y,1h:5y + +[default] +pattern = .* +retentions = 10s:1d,1m:7d,10m:1y +``` + +This config specifies the resolution of metrics and the retention periods. For example for all metrics beginning with the word `carbon` receive metrics every minute and store for 30 days, then +roll them up into 10 minute buckets and store those for 1 year, then roll those up into 1 hour buckets and store those for 5 years. For all other metrics +the default rule will be applied with other retention periods. + +This configuration is very important, as the first retention period must match the rate of which you send metrics. The default rule has 10 seconds +as its first resolution so when configuring StatsD we should configure it to send metrics every 10 seconds. + +> If you send values more frequently than the highest resolution, for example if you send data every second but +> the storage schema rules defines the highest resolution to be 10 seconds, then the values you send will just +> overwrite each other and the last value sent during every 10 second period will be saved. StatsD can work around this +> problem. + +### Configure storage-aggregation.conf + +Copy the default config and open it in an editor. +``` +cp /opt/graphite/conf/storage-aggregation.conf.example /opt/graphite/conf/storage-aggregation.conf +``` + +Example config: +``` +[min] +pattern = \.min$ +xFilesFactor = 0.1 +aggregationMethod = min + +[max] +pattern = \.max$ +xFilesFactor = 0.1 +aggregationMethod = max + +[sum] +pattern = \.count$ +xFilesFactor = 0 +aggregationMethod = sum + +[default_average] +pattern = .* +xFilesFactor = 0.5 +aggregationMethod = average +``` + +You do not really need to change the default config, but is very important to +understand what the config controls and what implications that it has. Graphite +does rollups as part of the metric ingestion according to the rules defined in +`storage-schemas.conf`. For example, given storage schema rule `10s:1d,1m:7d`, +when aggregating 6 values (each representing 10 seconds) into a 1min bucket graphite +will use an `aggregationMethod` like for example `average`. What method to use +will be determined by the rules specified in `storage-aggregation.conf`. + +The default rules all look at the metric path ending. Does it end with `.count` then use `sum` when doing rollups, does it end with `max` then use `max` function, and if it does not +end with max, min or count then use average. This means that naming metrics is very important! But don't worry if you use StatsD it will send the correct names to graphite. + +### Start carbon +Lets install supervisord and let it start carbon. + +`apt-get install supervisor` + +Create a new file in `/etc/supervisor/conf.d/carbon.conf` with the following: + +``` +[program:carbon-cache] +command = /opt/graphite/bin/carbon-cache.py --debug start +stdout_logfile = /var/log/supervisor/%(program_name)s.log +stderr_logfile = /var/log/supervisor/%(program_name)s.log +autorestart = true +stopsignal = QUIT +``` + +``` +supervisorctl reload +``` + +### Graphite-api + +Graphite api is a light weight version of graphite-web with only the api component (no web ui). It is dead simple +to install. + +``` +pip install gunicorn graphite-api +``` + +You should now have a graphite-api daemon running with an open HTTP api port of 8888. + +### Configuring Graphite-api + +Create a file `/etc/graphite-api.yaml` with an editor and set it's content to: + +``` +search_index: /opt/graphite/storage/index +finders: + - graphite_api.finders.whisper.WhisperFinder +functions: + - graphite_api.functions.SeriesFunctions + - graphite_api.functions.PieFunctions +whisper: + directories: + - /opt/graphite/storage/whisper +time_zone: UTC +``` + +Lets create a supervisor file for graphite-api at `/etc/supervisor/graphite-api.conf` + +``` +[program:graphite-api] +command = gunicorn -b 0.0.0.0:8888 -w 2 --log-level info graphite_api.app:app +stdout_logfile = /var/log/supervisor/%(program_name)s.log +stderr_logfile = /var/log/supervisor/%(program_name)s.log +autorestart = true +stopsignal = QUIT +``` + +Reload supervisor + + supervisorctl reload + +A carbon-cache daemon and graphite-api should now be running. Type `supervisorctl status` to verify that they are running. You can +also open `http://your_server_ip:8888/metrics/find?query?*` in your browser. You should see a json snippet. + + +### Install Grafana + + cd /tmp/ + wget https://grafanarel.s3.amazonaws.com/builds/grafana_2.1.1_amd64.deb + sudo dpkg -i grafana_2.1.1_amd64.deb + sudo service grafana-server start + +Grafana should now be running with default config on port 3000. + +## Grafana - first steps + +### Add data source + +Open http://your_server_ip:3000 in your browser and login with the default user and password (`admin/admin`). + +- Click on `Data Sources` on the side menu. +- Click on `Add new` in the top menu +- Specify name `graphite` and check the `Default ` checkbox +- Specify Url `http://localhost:8888` and Access `proxy` +- Click `Add ` button + +### Your first dashboard + +- Click on `Dashboards` +- Click on `Home` button in the top menu, this should open the dashboard search dropdown +- Click on `New` button in the bottom of this dropdown + +### Add a graph + +- Click on the green icon to the left to open the row menu +- Select `Add Panel` > `Graph` from the row menu +- An empty graph panel should appear with title `no title (click here)`. Click on this title and then `Edit` +- This will open the graph in edit mode and take you to the metrics tab. +- There is one query already added (assigned letter A) but it is empty. +- Click on `select metric` to pick the first graphite metric node. A new `select metric` link will appear until you reached a leaf node. +- Try picking the metric paths for `carbon.agents..cpuUsage`, you should now see a line appear in the graph! + +## Writing metrics to Graphite +Graphite has the simplest metric write protocol imaginable. Something that has surely contributed to its wide adoption by metric +frameworks and numerous integrations. + + prod.server1.requests.count 10 1398969187 + + + +There are hundreds of tools and instrumentation frameworks that can send metrics using this protocol. + +### Installing StatsD +StatsD is a metrics aggregation daemon that makes it easy for apps on many machines to send measurements like timings and counters and have them aggregated or percentiles calculated. + +### Sending metrics to StatsD diff --git a/docs/sources/versions.html_fragment b/docs/sources/versions.html_fragment index 55190af8b37bf..3733641ac7972 100644 --- a/docs/sources/versions.html_fragment +++ b/docs/sources/versions.html_fragment @@ -1,3 +1,4 @@ +
  • Version v2.5
  • Version v2.1
  • Version v2.0
  • Version v1.9
  • diff --git a/emails/README.md b/emails/README.md new file mode 100644 index 0000000000000..9702abce7e62c --- /dev/null +++ b/emails/README.md @@ -0,0 +1,9 @@ + +- npm install +- gem install premailer +- grunt (default task will build new inlines email templates) +- grunt watch (will build on source html or css change) + +assembled email templates will be in dist/ and final +inlined templates will be in ../public/emails/ + diff --git a/emails/assets/css/ink.css b/emails/assets/css/ink.css new file mode 100644 index 0000000000000..e65aed1919479 --- /dev/null +++ b/emails/assets/css/ink.css @@ -0,0 +1,688 @@ +/********************************************** +* Ink v1.0.5 - Copyright 2013 ZURB Inc * +**********************************************/ + +/* Client-specific Styles & Reset */ + +#outlook a { + padding:0; +} + +body{ + width:100% !important; + min-width: 100%; + -webkit-text-size-adjust:100%; + -ms-text-size-adjust:100%; + margin:0; + padding:0; +} + +.ExternalClass { + width:100%; +} + +.ExternalClass, +.ExternalClass p, +.ExternalClass span, +.ExternalClass font, +.ExternalClass td, +.ExternalClass div { + line-height: 100%; +} + +#backgroundTable { + margin:0; + padding:0; + width:100% !important; + line-height: 100% !important; +} + +img { + outline:none; + text-decoration:none; + -ms-interpolation-mode: bicubic; + width: auto; + max-width: 100%; + float: left; + clear: both; + display: block; +} + +center { + width: 100%; + min-width: 580px; +} + +a img { + border: none; +} + +p { + margin: 0 0 0 10px; +} + +table { + border-spacing: 0; + border-collapse: collapse; +} + +td { + word-break: break-word; + -webkit-hyphens: auto; + -moz-hyphens: auto; + hyphens: auto; + border-collapse: collapse !important; +} + +table, tr, td { + padding: 0; + vertical-align: top; + text-align: left; +} + +hr { + color: #d9d9d9; + background-color: #d9d9d9; + height: 1px; + border: none; +} + +/* Responsive Grid */ + +table.body { + height: 100%; + width: 100%; +} + +table.container { + width: 580px; + margin: 0 auto; + text-align: inherit; +} + +table.row { + padding: 0px; + width: 100%; + position: relative; +} + +table.container table.row { + display: block; +} + +td.wrapper { + padding: 10px 20px 0px 0px; + position: relative; +} + +table.columns, +table.column { + margin: 0 auto; +} + +table.columns td, +table.column td { + padding: 0px 0px 10px; +} + +table.columns td.sub-columns, +table.column td.sub-columns, +table.columns td.sub-column, +table.column td.sub-column { + padding-right: 10px; +} + +td.sub-column, td.sub-columns { + min-width: 0px; +} + +table.row td.last, +table.container td.last { + padding-right: 0px; +} + +table.one { width: 30px; } +table.two { width: 80px; } +table.three { width: 130px; } +table.four { width: 180px; } +table.five { width: 230px; } +table.six { width: 280px; } +table.seven { width: 330px; } +table.eight { width: 380px; } +table.nine { width: 430px; } +table.ten { width: 480px; } +table.eleven { width: 530px; } +table.twelve { width: 580px; } + +table.one center { min-width: 30px; } +table.two center { min-width: 80px; } +table.three center { min-width: 130px; } +table.four center { min-width: 180px; } +table.five center { min-width: 230px; } +table.six center { min-width: 280px; } +table.seven center { min-width: 330px; } +table.eight center { min-width: 380px; } +table.nine center { min-width: 430px; } +table.ten center { min-width: 480px; } +table.eleven center { min-width: 530px; } +table.twelve center { min-width: 580px; } + +table.one .panel center { min-width: 10px; } +table.two .panel center { min-width: 60px; } +table.three .panel center { min-width: 110px; } +table.four .panel center { min-width: 160px; } +table.five .panel center { min-width: 210px; } +table.six .panel center { min-width: 260px; } +table.seven .panel center { min-width: 310px; } +table.eight .panel center { min-width: 360px; } +table.nine .panel center { min-width: 410px; } +table.ten .panel center { min-width: 460px; } +table.eleven .panel center { min-width: 510px; } +table.twelve .panel center { min-width: 560px; } + +.body .columns td.one, +.body .column td.one { width: 8.333333%; } +.body .columns td.two, +.body .column td.two { width: 16.666666%; } +.body .columns td.three, +.body .column td.three { width: 25%; } +.body .columns td.four, +.body .column td.four { width: 33.333333%; } +.body .columns td.five, +.body .column td.five { width: 41.666666%; } +.body .columns td.six, +.body .column td.six { width: 50%; } +.body .columns td.seven, +.body .column td.seven { width: 58.333333%; } +.body .columns td.eight, +.body .column td.eight { width: 66.666666%; } +.body .columns td.nine, +.body .column td.nine { width: 75%; } +.body .columns td.ten, +.body .column td.ten { width: 83.333333%; } +.body .columns td.eleven, +.body .column td.eleven { width: 91.666666%; } +.body .columns td.twelve, +.body .column td.twelve { width: 100%; } + +td.offset-by-one { padding-left: 50px; } +td.offset-by-two { padding-left: 100px; } +td.offset-by-three { padding-left: 150px; } +td.offset-by-four { padding-left: 200px; } +td.offset-by-five { padding-left: 250px; } +td.offset-by-six { padding-left: 300px; } +td.offset-by-seven { padding-left: 350px; } +td.offset-by-eight { padding-left: 400px; } +td.offset-by-nine { padding-left: 450px; } +td.offset-by-ten { padding-left: 500px; } +td.offset-by-eleven { padding-left: 550px; } + +td.expander { + visibility: hidden; + width: 0px; + padding: 0 !important; +} + +table.columns .text-pad, +table.column .text-pad { + padding-left: 10px; + padding-right: 10px; +} + +table.columns .left-text-pad, +table.columns .text-pad-left, +table.column .left-text-pad, +table.column .text-pad-left { + padding-left: 10px; +} + +table.columns .right-text-pad, +table.columns .text-pad-right, +table.column .right-text-pad, +table.column .text-pad-right { + padding-right: 10px; +} + +/* Block Grid */ + +.block-grid { + width: 100%; + max-width: 580px; +} + +.block-grid td { + display: inline-block; + padding:10px; +} + +.two-up td { + width:270px; +} + +.three-up td { + width:173px; +} + +.four-up td { + width:125px; +} + +.five-up td { + width:96px; +} + +.six-up td { + width:76px; +} + +.seven-up td { + width:62px; +} + +.eight-up td { + width:52px; +} + +/* Alignment & Visibility Classes */ + +table.center, td.center { + text-align: center; +} + +h1.center, +h2.center, +h3.center, +h4.center, +h5.center, +h6.center { + text-align: center; +} + +span.center { + display: block; + width: 100%; + text-align: center; +} + +img.center { + margin: 0 auto; + float: none; +} + +.show-for-small, +.hide-for-desktop { + display: none; +} + +/* Typography */ + +body, table.body, h1, h2, h3, h4, h5, h6, p, td { + color: #222222; + font-family: "Helvetica", "Arial", sans-serif; + font-weight: normal; + padding:0; + margin: 0; + text-align: left; + line-height: 1.3; +} + +h1, h2, h3, h4, h5, h6 { + word-break: normal; +} + +h1 {font-size: 40px;} +h2 {font-size: 36px;} +h3 {font-size: 32px;} +h4 {font-size: 28px;} +h5 {font-size: 24px;} +h6 {font-size: 20px;} +body, table.body, p, td {font-size: 14px;line-height:19px;} + +p.lead, p.lede, p.leed { + font-size: 18px; + line-height:21px; +} + +p { + margin-bottom: 10px; +} + +small { + font-size: 10px; +} + +a { + color: #2ba6cb; + text-decoration: none; +} + +a:hover { + color: #2795b6 !important; +} + +a:active { + color: #2795b6 !important; +} + +a:visited { + color: #2ba6cb !important; +} + +h1 a, +h2 a, +h3 a, +h4 a, +h5 a, +h6 a { + color: #2ba6cb; +} + +h1 a:active, +h2 a:active, +h3 a:active, +h4 a:active, +h5 a:active, +h6 a:active { + color: #2ba6cb !important; +} + +h1 a:visited, +h2 a:visited, +h3 a:visited, +h4 a:visited, +h5 a:visited, +h6 a:visited { + color: #2ba6cb !important; +} + +/* Panels */ + +.panel { + background: #f2f2f2; + border: 1px solid #d9d9d9; + padding: 10px !important; +} + +.sub-grid table { + width: 100%; +} + +.sub-grid td.sub-columns { + padding-bottom: 0; +} + +/* Buttons */ + +table.button, +table.tiny-button, +table.small-button, +table.medium-button, +table.large-button { + width: 100%; + overflow: hidden; +} + +table.button td, +table.tiny-button td, +table.small-button td, +table.medium-button td, +table.large-button td { + display: block; + width: auto !important; + text-align: center; + background: #2ba6cb; + border: 1px solid #2284a1; + color: #ffffff; + padding: 8px 0; +} + +table.tiny-button td { + padding: 5px 0 4px; +} + +table.small-button td { + padding: 8px 0 7px; +} + +table.medium-button td { + padding: 12px 0 10px; +} + +table.large-button td { + padding: 21px 0 18px; +} + +table.button td a, +table.tiny-button td a, +table.small-button td a, +table.medium-button td a, +table.large-button td a { + font-weight: bold; + text-decoration: none; + font-family: Helvetica, Arial, sans-serif; + color: #ffffff; + font-size: 16px; +} + +table.tiny-button td a { + font-size: 12px; + font-weight: normal; +} + +table.small-button td a { + font-size: 16px; +} + +table.medium-button td a { + font-size: 20px; +} + +table.large-button td a { + font-size: 24px; +} + +table.button:hover td, +table.button:visited td, +table.button:active td { + background: #2795b6 !important; +} + +table.button:hover td a, +table.button:visited td a, +table.button:active td a { + color: #fff !important; +} + +table.button:hover td, +table.tiny-button:hover td, +table.small-button:hover td, +table.medium-button:hover td, +table.large-button:hover td { + background: #2795b6 !important; +} + +table.button:hover td a, +table.button:active td a, +table.button td a:visited, +table.tiny-button:hover td a, +table.tiny-button:active td a, +table.tiny-button td a:visited, +table.small-button:hover td a, +table.small-button:active td a, +table.small-button td a:visited, +table.medium-button:hover td a, +table.medium-button:active td a, +table.medium-button td a:visited, +table.large-button:hover td a, +table.large-button:active td a, +table.large-button td a:visited { + color: #ffffff !important; +} + +table.secondary td { + background: #e9e9e9; + border-color: #d0d0d0; + color: #555; +} + +table.secondary td a { + color: #555; +} + +table.secondary:hover td { + background: #d0d0d0 !important; + color: #555; +} + +table.secondary:hover td a, +table.secondary td a:visited, +table.secondary:active td a { + color: #555 !important; +} + +table.success td { + background: #5da423; + border-color: #457a1a; +} + +table.success:hover td { + background: #457a1a !important; +} + +table.alert td { + background: #c60f13; + border-color: #970b0e; +} + +table.alert:hover td { + background: #970b0e !important; +} + +table.radius td { + -webkit-border-radius: 3px; + -moz-border-radius: 3px; + border-radius: 3px; +} + +table.round td { + -webkit-border-radius: 500px; + -moz-border-radius: 500px; + border-radius: 500px; +} + +/* Outlook First */ + +body.outlook p { + display: inline !important; +} + +/* Media Queries */ + +@media only screen and (max-width: 600px) { + + table[class="body"] img { + width: auto !important; + height: auto !important; + } + + table[class="body"] center { + min-width: 0 !important; + } + + table[class="body"] .container { + width: 95% !important; + } + + table[class="body"] .row { + width: 100% !important; + display: block !important; + } + + table[class="body"] .wrapper { + display: block !important; + padding-right: 0 !important; + } + + table[class="body"] .columns, + table[class="body"] .column { + table-layout: fixed !important; + float: none !important; + width: 100% !important; + padding-right: 0px !important; + padding-left: 0px !important; + display: block !important; + } + + table[class="body"] .wrapper.first .columns, + table[class="body"] .wrapper.first .column { + display: table !important; + } + + table[class="body"] table.columns td, + table[class="body"] table.column td { + width: 100% !important; + } + + table[class="body"] .columns td.one, + table[class="body"] .column td.one { width: 8.333333% !important; } + table[class="body"] .columns td.two, + table[class="body"] .column td.two { width: 16.666666% !important; } + table[class="body"] .columns td.three, + table[class="body"] .column td.three { width: 25% !important; } + table[class="body"] .columns td.four, + table[class="body"] .column td.four { width: 33.333333% !important; } + table[class="body"] .columns td.five, + table[class="body"] .column td.five { width: 41.666666% !important; } + table[class="body"] .columns td.six, + table[class="body"] .column td.six { width: 50% !important; } + table[class="body"] .columns td.seven, + table[class="body"] .column td.seven { width: 58.333333% !important; } + table[class="body"] .columns td.eight, + table[class="body"] .column td.eight { width: 66.666666% !important; } + table[class="body"] .columns td.nine, + table[class="body"] .column td.nine { width: 75% !important; } + table[class="body"] .columns td.ten, + table[class="body"] .column td.ten { width: 83.333333% !important; } + table[class="body"] .columns td.eleven, + table[class="body"] .column td.eleven { width: 91.666666% !important; } + table[class="body"] .columns td.twelve, + table[class="body"] .column td.twelve { width: 100% !important; } + + table[class="body"] td.offset-by-one, + table[class="body"] td.offset-by-two, + table[class="body"] td.offset-by-three, + table[class="body"] td.offset-by-four, + table[class="body"] td.offset-by-five, + table[class="body"] td.offset-by-six, + table[class="body"] td.offset-by-seven, + table[class="body"] td.offset-by-eight, + table[class="body"] td.offset-by-nine, + table[class="body"] td.offset-by-ten, + table[class="body"] td.offset-by-eleven { + padding-left: 0 !important; + } + + table[class="body"] table.columns td.expander { + width: 1px !important; + } + + table[class="body"] .right-text-pad, + table[class="body"] .text-pad-right { + padding-left: 10px !important; + } + + table[class="body"] .left-text-pad, + table[class="body"] .text-pad-left { + padding-right: 10px !important; + } + + table[class="body"] .hide-for-small, + table[class="body"] .show-for-desktop { + display: none !important; + } + + table[class="body"] .show-for-small, + table[class="body"] .hide-for-desktop { + display: inherit !important; + } +} diff --git a/emails/assets/css/style.css b/emails/assets/css/style.css new file mode 100644 index 0000000000000..f138a0c12961f --- /dev/null +++ b/emails/assets/css/style.css @@ -0,0 +1,144 @@ + +body, table.body, h1, h2, h3, h4, h5, h6, p, td { + font-family: 'Open Sans', 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; + -webkit-font-smoothing: antialiased; + -webkit-text-size-adjust: none; +} + +h1 {font-size: 40px;} +h2 {font-size: 36px;} +h3 { + font-size: 22px; + margin-top: 20px; +} +h4 {font-size: 20px;} +h5 {font-size: 18px;} +h6 {font-size: 16px;} + +.emphasis { + font-weight: 600; +} + +a { + color: #E67612; + text-decoration: none; +} + +a:hover { + color: #ff8f2b !important; +} + +a:active { + color: #F2821E !important; +} + +a:visited { + color: #E67612 !important; +} + +table.facebook td { + background: #3b5998; + border-color: #2d4473; +} + +table.facebook:hover td { + background: #2d4473 !important; +} + +table.twitter td { + background: #00acee; + border-color: #0087bb; +} + +table.twitter:hover td { + background: #0087bb !important; +} + +table.google-plus td { + background-color: #DB4A39; + border-color: #CC0000; +} + +table.google-plus:hover td { + background: #CC0000 !important; +} + +.template-label { + color: #ffffff; + font-weight: bold; + font-size: 11px; +} + +.callout .wrapper { + padding-bottom: 20px; +} + +.callout .panel { + background: #ECF8FF; + border-color: #b9e5ff; +} + +.header { + background: #333; +} + +.footer { + margin-top: 20px; +} + +@media only screen and (max-width: 600px) { + table[class="body"] .right-text-pad { + padding-left: 10px !important; + } + + table[class="body"] .left-text-pad { + padding-right: 10px !important; + } +} + +table.better-button { + margin-top: 10px; + margin-bottom: 20px; +} + +table.columns td.better-button { + -webkit-border-radius: 2px; + -moz-border-radius: 2px; + border-radius: 2px; + padding-bottom: 0px; +} + +.better-button a { + text-decoration: none; + -webkit-border-radius: 2px; + -moz-border-radius: 2px; + border-radius: 2px; + + padding: 12px 25px; + border: 1px solid #ff8f2b; + display: inline-block; + color: #FFF; +} + +.better-button:hover a { + color: #FFFFFF !important; + background-color: #F2821E; + border: 1px solid #F2821E; +} + +.better-button:visited a { + color: #FFFFFF !important; +} + +.better-button:active a { + color: #FFFFFF !important; +} + +.verification-code { + background-color: #EEEEEE; + padding: 3px; + margin: 8px; + display: inline-block; + font-weight: bold; + font-size: 20px; +} diff --git a/emails/grunt/aliases.yaml b/emails/grunt/aliases.yaml new file mode 100644 index 0000000000000..6a2e47777cd5a --- /dev/null +++ b/emails/grunt/aliases.yaml @@ -0,0 +1,8 @@ + +default: + - 'clean' + - 'assemble' + - 'replace' + - 'uncss' + - 'processhtml' + - 'premailer' diff --git a/emails/grunt/assemble.js b/emails/grunt/assemble.js new file mode 100644 index 0000000000000..a4a6390c4c255 --- /dev/null +++ b/emails/grunt/assemble.js @@ -0,0 +1,16 @@ +module.exports = function() { + 'use strict'; + return { + options: { + layout: 'templates/layouts/default.html', + partials: ['templates/partials/*.hbs'], + helpers: ['templates/helpers/**/*.js'], + data: [], + flatten: true + }, + pages: { + src: ['templates/*.html'], + dest: 'dist/' + } + }; +}; diff --git a/tasks/options/useminPrepare.js b/emails/grunt/clean.js similarity index 56% rename from tasks/options/useminPrepare.js rename to emails/grunt/clean.js index f4b948a846d87..60b8ff0a91745 100644 --- a/tasks/options/useminPrepare.js +++ b/emails/grunt/clean.js @@ -1,7 +1,5 @@ module.exports = function(config) { return { - html: [ - 'tmp/index.html', - ] + dist: ['dist'], }; }; diff --git a/emails/grunt/premailer.js b/emails/grunt/premailer.js new file mode 100644 index 0000000000000..c50ca1af015ec --- /dev/null +++ b/emails/grunt/premailer.js @@ -0,0 +1,14 @@ +module.exports = { + main: { + options: { + verbose: true, + removeComments: true + }, + files: [{ + expand: true, // Enable dynamic expansion. + cwd: 'dist', // Src matches are relative to this path. + src: ['*.html'], // Actual pattern(s) to match. + dest: '../public/emails/', // Destination path prefix. + }], + } +}; diff --git a/emails/grunt/processhtml.js b/emails/grunt/processhtml.js new file mode 100644 index 0000000000000..efe34626a4968 --- /dev/null +++ b/emails/grunt/processhtml.js @@ -0,0 +1,10 @@ +module.exports = { + dist: { + files: [{ + expand: true, // Enable dynamic expansion. + cwd: 'dist', // Src matches are relative to this path. + src: ['*.html'], // Actual pattern(s) to match. + dest: 'dist/', // Destination path prefix. + }], + } +}; diff --git a/emails/grunt/replace.js b/emails/grunt/replace.js new file mode 100644 index 0000000000000..7038802d34e1b --- /dev/null +++ b/emails/grunt/replace.js @@ -0,0 +1,13 @@ +module.exports = { + dist: { + overwrite: true, + src: ['dist/*.html'], + replacements: [{ + from: '[[', + to: '{{' + }, { + from: ']]', + to: '}}' + }] + } +}; diff --git a/emails/grunt/uncss.js b/emails/grunt/uncss.js new file mode 100644 index 0000000000000..114af6942c57f --- /dev/null +++ b/emails/grunt/uncss.js @@ -0,0 +1,9 @@ +module.exports = { + dist: { + src: ['dist/*.html'], + dest: 'dist/css/tidy.css', + options: { + report: 'min' // optional: include to report savings + } + } +}; diff --git a/emails/grunt/watch.js b/emails/grunt/watch.js new file mode 100644 index 0000000000000..65b3970da7f25 --- /dev/null +++ b/emails/grunt/watch.js @@ -0,0 +1,16 @@ +module.exports = { + src: { + files: [ + //what are the files that we want to watch + 'assets/css/*.css', + 'templates/**/*.html', + 'grunt/*.js', + ], + tasks: ['default'], + options: { + nospawn: true, + livereload: false, + } + } + +}; diff --git a/emails/gruntfile.js b/emails/gruntfile.js new file mode 100644 index 0000000000000..b6d54cf651b16 --- /dev/null +++ b/emails/gruntfile.js @@ -0,0 +1,6 @@ +module.exports = function(grunt) { + + // load grunt config + require('load-grunt-config')(grunt); + +}; diff --git a/emails/package.json b/emails/package.json new file mode 100644 index 0000000000000..96779fb560973 --- /dev/null +++ b/emails/package.json @@ -0,0 +1,24 @@ +{ + "name": "Grafana-Email-Campaign", + "version": "1.0.0", + "description": "Grafana Email templates based on Zurb Ink", + "repository": "dnnsldr/", + "author": { + "name": "dnnsldr", + "email": "delder@riester.com", + "url": "https://github.com/dnnsldr" + }, + "devDependencies": { + "grunt": "^0.4.5", + "grunt-premailer": "^0.2.10", + "grunt-processhtml": "^0.3.3", + "grunt-uncss": "^0.3.7", + "load-grunt-config": "^0.14.0", + "grunt-contrib-watch": "^0.6.1", + "grunt-text-replace": "^0.3.12" + }, + "dependencies": { + "grunt-assemble": "^0.4.0", + "grunt-contrib-clean": "^0.6.0" + } +} diff --git a/emails/templates/invited_to_org.html b/emails/templates/invited_to_org.html new file mode 100644 index 0000000000000..59e76cd5a77f3 --- /dev/null +++ b/emails/templates/invited_to_org.html @@ -0,0 +1,47 @@ + + +[[Subject .Subject "[[.InvitedBy]] has added you to the [[.OrgName]] organization"]] + + + + + +
    + + + + + + +
    +

    You have been added to [[.OrgName]]

    +
    + +
    + + + + + +
    + + + + + + + + +
    +

    [[.InvitedBy]] has added you to the [[.OrgName]] organization in Grafana. +

    Once logged in, [[.OrgName]] will be available in the left side menu, in the dropdown below your username.

    +
    + + + + +
    Log in now
    +
    +
    + + diff --git a/emails/templates/layouts/default.html b/emails/templates/layouts/default.html new file mode 100644 index 0000000000000..0fd5b35ea4812 --- /dev/null +++ b/emails/templates/layouts/default.html @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + +
    +
    + + + + + +
    +
    + + + + + +
    + + + + + + +
    + +
    + +
    + +
    +
    + + + + + + +
    + {{> body }} + + + + + + + + +
    +
    +
    + + diff --git a/emails/templates/new_user_invite.html b/emails/templates/new_user_invite.html new file mode 100644 index 0000000000000..86948a665f6d2 --- /dev/null +++ b/emails/templates/new_user_invite.html @@ -0,0 +1,49 @@ + + +[[Subject .Subject "[[.InvitedBy]] has invited you to join Grafana"]] + + + + + +
    + + + + + + +
    +

    You're invited to join [[.OrgName]]

    +
    + +
    + + + + + +
    + + + + + + + + + + + +
    +

    You've been invited to join the [[.OrgName]] organization by [[.InvitedBy]]. To accept your invitation and join the team, please click the link below:

    +
    + + + + +
    Accept Invitation
    +
    +

    You can also copy/paste this link into your browser directly: [[.LinkUrl]]

    +
    +
    \ No newline at end of file diff --git a/emails/templates/reset_password.html b/emails/templates/reset_password.html new file mode 100644 index 0000000000000..f3aca5da95eb8 --- /dev/null +++ b/emails/templates/reset_password.html @@ -0,0 +1,42 @@ +[[Subject .Subject "Reset your Grafana password - [[.Name]]"]] + + + + + +
    + + + + + + +
    +

    Hi [[.Name]]

    +
    + +
    + + + + + +
    + + + + + +
    +

    + Please click the following link to reset your password within [[.EmailCodeValidHours]] hours. +

    +

    + [[.AppUrl]]user/password/reset?code=[[.Code]] +

    +

    Not working? Try copying and pasting it to your browser.

    +
    + +
    + + diff --git a/emails/templates/signup_started.html b/emails/templates/signup_started.html new file mode 100644 index 0000000000000..39b369a1f7cd6 --- /dev/null +++ b/emails/templates/signup_started.html @@ -0,0 +1,46 @@ +[[Subject .Subject "Welcome to Grafana, please complete your sign up!"]] + + + + + +
    + + + + + + +
    +

    Complete the signup

    +
    + +
    + + + + + +
    + + + + + + + + +
    + Copy and past the email verification code:
    + [[.Code]]
    in + the sign up form or use the link below. +
    + + + + +
    Complete Sign Up
    +
    +
    + + diff --git a/emails/templates/welcome_on_signup.html b/emails/templates/welcome_on_signup.html new file mode 100644 index 0000000000000..b93c56b77b1b7 --- /dev/null +++ b/emails/templates/welcome_on_signup.html @@ -0,0 +1,37 @@ +[[Subject .Subject "Welcome to Grafana"]] + + + + + +
    + + + + + + +
    +

    Hi [[.Name]]

    +
    + +
    + + + + + +
    + + + + + +
    +

    + If you are new to Grafana please read the Getting Started guide. +

    +
    +
    + + diff --git a/public/test/karma.conf.js b/karma.conf.js similarity index 61% rename from public/test/karma.conf.js rename to karma.conf.js index d5d67d5218bee..f05b7dcc61a76 100644 --- a/public/test/karma.conf.js +++ b/karma.conf.js @@ -2,17 +2,16 @@ module.exports = function(config) { 'use strict'; config.set({ - basePath: '../../', + basePath: __dirname + '/public_gen', frameworks: ['mocha', 'requirejs', 'expect', 'sinon'], // list of files / patterns to load in the browser files: [ - 'public/test/test-main.js', - {pattern: 'public/app/**/*.js', included: false}, - {pattern: 'public/vendor/**/*.js', included: false}, - {pattern: 'public/test/**/*.js', included: false}, - {pattern: 'public/**/*.js', included: false} + 'test/test-main.js', + {pattern: 'app/**/*.js', included: false}, + {pattern: 'vendor/**/*.js', included: false}, + {pattern: 'test/**/*.js', included: false} ], // list of files to exclude @@ -26,6 +25,8 @@ module.exports = function(config) { browsers: ['PhantomJS'], captureTimeout: 60000, singleRun: true, + autoWatchBatchDelay: 1000, }); + }; diff --git a/latest.json b/latest.json index a85d79df53991..79eb42a8527f9 100644 --- a/latest.json +++ b/latest.json @@ -1,3 +1,3 @@ { - "version": "2.0.2" + "version": "2.1.1" } diff --git a/main.go b/main.go index b205487f38337..4a052ea493408 100644 --- a/main.go +++ b/main.go @@ -2,12 +2,14 @@ package main import ( "flag" + "fmt" "io/ioutil" "os" "os/signal" "path/filepath" "runtime" "strconv" + "syscall" "time" "github.com/grafana/grafana/pkg/cmd" @@ -26,31 +28,35 @@ import ( var version = "master" var commit = "NA" var buildstamp string +var build_date string var configFile = flag.String("config", "", "path to config file") var homePath = flag.String("homepath", "", "path to grafana install/home path, defaults to working directory") var pidFile = flag.String("pidfile", "", "path to pid file") +var exitChan = make(chan int) func init() { runtime.GOMAXPROCS(runtime.NumCPU()) } func main() { + + v := flag.Bool("v", false, "prints current version and exits") + flag.Parse() + if *v { + fmt.Printf("Version %s (commit: %s)\n", version, commit) + os.Exit(0) + } + buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64) setting.BuildVersion = version setting.BuildCommit = commit setting.BuildStamp = buildstampInt64 - go func() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - <-c - os.Exit(0) - }() + go listenToSystemSignels() flag.Parse() - writePIDFile() initRuntime() @@ -69,17 +75,20 @@ func main() { } cmd.StartServer() - - log.Close() + exitChan <- 0 } func initRuntime() { - setting.NewConfigContext(&setting.CommandLineArgs{ + err := setting.NewConfigContext(&setting.CommandLineArgs{ Config: *configFile, HomePath: *homePath, Args: flag.Args(), }) + if err != nil { + log.Fatal(3, err.Error()) + } + log.Info("Starting Grafana") log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0)) setting.LogConfigurationInfo() @@ -105,3 +114,27 @@ func writePIDFile() { log.Fatal(3, "Failed to write pidfile", err) } } + +func listenToSystemSignels() { + signalChan := make(chan os.Signal, 1) + code := 0 + + signal.Notify(signalChan, os.Interrupt) + signal.Notify(signalChan, os.Kill) + signal.Notify(signalChan, syscall.SIGTERM) + + select { + case sig := <-signalChan: + log.Info("Received signal %s. shutting down", sig) + case code = <-exitChan: + switch code { + case 0: + log.Info("Shutting down") + default: + log.Warn("Shutting down") + } + } + + log.Close() + os.Exit(code) +} diff --git a/package.json b/package.json index 4200da6951a69..75136e48f1a8d 100644 --- a/package.json +++ b/package.json @@ -4,7 +4,7 @@ "company": "Coding Instinct AB" }, "name": "grafana", - "version": "2.1.0-pre1", + "version": "2.6.0-pre1", "repository": { "type": "git", "url": "http://github.com/torkelo/grafana.git" @@ -21,10 +21,10 @@ "grunt-contrib-connect": "~0.5.0", "grunt-contrib-copy": "~0.5.0", "grunt-contrib-cssmin": "~0.6.1", - "grunt-contrib-htmlmin": "~0.1.3", + "grunt-contrib-htmlmin": "~0.6.0", "grunt-contrib-jshint": "~0.10.0", "grunt-contrib-less": "~0.7.0", - "grunt-contrib-requirejs": "~0.4.1", + "grunt-contrib-requirejs": "~0.4.4", "grunt-contrib-uglify": "~0.8.0", "grunt-contrib-watch": "^0.6.1", "grunt-filerev": "^0.2.1", @@ -32,6 +32,8 @@ "grunt-karma": "~0.8.3", "grunt-ng-annotate": "^0.9.2", "grunt-string-replace": "~0.2.4", + "grunt-tslint": "^2.5.0", + "grunt-typescript": "^0.7.0", "grunt-usemin": "3.0.0", "jshint-stylish": "~0.1.5", "karma": "~0.12.31", @@ -47,21 +49,23 @@ "load-grunt-tasks": "0.2.0", "mocha": "2.2.4", "requirejs": "2.1.17", - "rjs-build-analysis": "0.0.3" + "rjs-build-analysis": "0.0.3", + "tslint": "^2.5.0-beta" }, "engines": { - "node": "0.10.x", - "npm": "1.2.x" + "node": "0.4.x", + "npm": "2.14.x" }, "scripts": { "test": "grunt test", "coveralls": "grunt karma:coveralls && rm -rf ./coverage" }, - "license": "Apache License", + "license": "Apache-2.0", "dependencies": { "grunt-jscs": "~1.5.x", + "grunt-sync": "^0.4.1", "karma-sinon": "^1.0.3", "lodash": "^2.4.1", - "sinon": "1.10.3" + "sinon": "1.16.1" } } diff --git a/packaging/deb/init.d/grafana-server b/packaging/deb/init.d/grafana-server index a4f6423a68d4f..9de4bf42092b4 100755 --- a/packaging/deb/init.d/grafana-server +++ b/packaging/deb/init.d/grafana-server @@ -36,6 +36,8 @@ MAX_OPEN_FILES=10000 PID_FILE=/var/run/$NAME.pid DAEMON=/usr/sbin/$NAME +umask 0027 + if [ `id -u` -ne 0 ]; then echo "You need root privileges to run this script" exit 4 diff --git a/packaging/deb/systemd/grafana-server.service b/packaging/deb/systemd/grafana-server.service index f99c96fcb139e..dd5d209714942 100644 --- a/packaging/deb/systemd/grafana-server.service +++ b/packaging/deb/systemd/grafana-server.service @@ -12,10 +12,12 @@ Type=simple WorkingDirectory=/usr/share/grafana ExecStart=/usr/sbin/grafana-server \ --config=${CONF_FILE} \ + --pidfile=${PID_FILE} \ cfg:default.paths.logs=${LOG_DIR} \ - cfg:default.paths.data=${DATA_DIR} \ + cfg:default.paths.data=${DATA_DIR} LimitNOFILE=10000 TimeoutStopSec=20 +UMask=0027 [Install] WantedBy=multi-user.target diff --git a/packaging/publish/publish.sh b/packaging/publish/publish.sh new file mode 100755 index 0000000000000..abccf74aa4bb9 --- /dev/null +++ b/packaging/publish/publish.sh @@ -0,0 +1,17 @@ +#! /usr/bin/env bash + +version=2.5.0 + +wget https://grafanarel.s3.amazonaws.com/builds/grafana_${version}_amd64.deb + +package_cloud push grafana/stable/debian/jessie grafana_${version}_amd64.deb +package_cloud push grafana/stable/debian/wheezy grafana_${version}_amd64.deb +package_cloud push grafana/testing/debian/jessie grafana_${version}_amd64.deb +package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb + +wget https://grafanarel.s3.amazonaws.com/builds/grafana-${version}-1.x86_64.rpm + +package_cloud push grafana/testing/el/6 grafana-${version}-1.x86_64.rpm +package_cloud push grafana/testing/el/7 grafana-${version}-1.x86_64.rpm +package_cloud push grafana/stable/el/7 grafana-${version}-1.x86_64.rpm +package_cloud push grafana/stable/el/6 grafana-${version}-1.x86_64.rpm diff --git a/packaging/rpm/init.d/grafana-server b/packaging/rpm/init.d/grafana-server index 92e88673d748f..bb27882f625ac 100755 --- a/packaging/rpm/init.d/grafana-server +++ b/packaging/rpm/init.d/grafana-server @@ -148,7 +148,7 @@ case "$1" in $0 start ;; *) - echo -n "Usage: $0 {start|stop|restart|force-reload|status}" + echo "Usage: $0 {start|stop|restart|force-reload|status}" exit 3 ;; esac diff --git a/packaging/rpm/systemd/grafana-server.service b/packaging/rpm/systemd/grafana-server.service index 56a958032b749..fb2ec24d12361 100644 --- a/packaging/rpm/systemd/grafana-server.service +++ b/packaging/rpm/systemd/grafana-server.service @@ -12,8 +12,9 @@ Type=simple WorkingDirectory=/usr/share/grafana ExecStart=/usr/sbin/grafana-server \ --config=${CONF_FILE} \ + --pidfile=${PID_FILE} \ cfg:default.paths.logs=${LOG_DIR} \ - cfg:default.paths.data=${DATA_DIR} \ + cfg:default.paths.data=${DATA_DIR} LimitNOFILE=10000 TimeoutStopSec=20 diff --git a/pkg/api/admin_settings.go b/pkg/api/admin_settings.go index 06413d6a0b187..1f800cfe5585f 100644 --- a/pkg/api/admin_settings.go +++ b/pkg/api/admin_settings.go @@ -17,7 +17,7 @@ func AdminGetSettings(c *middleware.Context) { for _, key := range section.Keys() { keyName := key.Name() value := key.Value() - if strings.Contains(keyName, "secret") || strings.Contains(keyName, "password") || (strings.Contains(keyName, "provider_config") && strings.Contains(value, "@")) { + if strings.Contains(keyName, "secret") || strings.Contains(keyName, "password") || (strings.Contains(keyName, "provider_config")) { value = "************" } diff --git a/pkg/api/admin_users.go b/pkg/api/admin_users.go index a293032c200d9..7690630aaa2bd 100644 --- a/pkg/api/admin_users.go +++ b/pkg/api/admin_users.go @@ -37,7 +37,14 @@ func AdminCreateUser(c *middleware.Context, form dtos.AdminCreateUserForm) { metrics.M_Api_Admin_User_Create.Inc(1) - c.JsonOK("User created") + user := cmd.Result + + result := m.UserIdDTO{ + Message: "User created", + Id: user.Id, + } + + c.JSON(200, result) } func AdminUpdateUserPassword(c *middleware.Context, form dtos.AdminUpdateUserPasswordForm) { diff --git a/pkg/api/api.go b/pkg/api/api.go index fd4160f9993a1..3d1b28d7aa3a3 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -14,14 +14,16 @@ func Register(r *macaron.Macaron) { reqGrafanaAdmin := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true, ReqGrafanaAdmin: true}) reqEditorRole := middleware.RoleAuth(m.ROLE_EDITOR, m.ROLE_ADMIN) regOrgAdmin := middleware.RoleAuth(m.ROLE_ADMIN) + quota := middleware.Quota bind := binding.Bind // not logged in views r.Get("/", reqSignedIn, Index) r.Get("/logout", Logout) - r.Post("/login", bind(dtos.LoginCommand{}), wrap(LoginPost)) - r.Get("/login/:name", OAuthLogin) + r.Post("/login", quota("session"), bind(dtos.LoginCommand{}), wrap(LoginPost)) + r.Get("/login/:name", quota("session"), OAuthLogin) r.Get("/login", LoginView) + r.Get("/invite/:code", Index) // authed views r.Get("/profile/", reqSignedIn, Index) @@ -36,11 +38,21 @@ func Register(r *macaron.Macaron) { r.Get("/admin/users", reqGrafanaAdmin, Index) r.Get("/admin/users/create", reqGrafanaAdmin, Index) r.Get("/admin/users/edit/:id", reqGrafanaAdmin, Index) + r.Get("/admin/orgs", reqGrafanaAdmin, Index) + r.Get("/admin/orgs/edit/:id", reqGrafanaAdmin, Index) + r.Get("/dashboard/*", reqSignedIn, Index) + r.Get("/dashboard-solo/*", reqSignedIn, Index) // sign up r.Get("/signup", Index) - r.Post("/api/user/signup", bind(m.CreateUserCommand{}), wrap(SignUp)) + r.Get("/api/user/signup/options", wrap(GetSignUpOptions)) + r.Post("/api/user/signup", quota("user"), bind(dtos.SignUpForm{}), wrap(SignUp)) + r.Post("/api/user/signup/step2", bind(dtos.SignUpStep2Form{}), wrap(SignUpStep2)) + + // invited + r.Get("/api/user/invite/:code", wrap(GetInviteInfoByCode)) + r.Post("/api/user/invite/complete", bind(dtos.CompleteInviteForm{}), wrap(CompleteInvite)) // reset password r.Get("/user/password/send-reset-email", Index) @@ -57,7 +69,7 @@ func Register(r *macaron.Macaron) { r.Get("/api/snapshots-delete/:key", DeleteDashboardSnapshot) // api renew session based on remember cookie - r.Get("/api/login/ping", LoginApiPing) + r.Get("/api/login/ping", quota("session"), LoginApiPing) // authed api r.Group("/api", func() { @@ -71,6 +83,7 @@ func Register(r *macaron.Macaron) { r.Post("/stars/dashboard/:id", wrap(StarDashboard)) r.Delete("/stars/dashboard/:id", wrap(UnstarDashboard)) r.Put("/password", bind(m.ChangeUserPasswordCommand{}), wrap(ChangeUserPassword)) + r.Get("/quotas", wrap(GetUserQuotas)) }) // users (admin permission required) @@ -81,45 +94,61 @@ func Register(r *macaron.Macaron) { r.Put("/:id", bind(m.UpdateUserCommand{}), wrap(UpdateUser)) }, reqGrafanaAdmin) - // current org + // org information available to all users. r.Group("/org", func() { r.Get("/", wrap(GetOrgCurrent)) - r.Put("/", bind(m.UpdateOrgCommand{}), wrap(UpdateOrgCurrent)) - r.Post("/users", bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg)) + r.Get("/quotas", wrap(GetOrgQuotas)) + }) + + // current org + r.Group("/org", func() { + r.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrgCurrent)) + r.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddressCurrent)) + r.Post("/users", quota("user"), bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg)) r.Get("/users", wrap(GetOrgUsersForCurrentOrg)) r.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUserForCurrentOrg)) r.Delete("/users/:userId", wrap(RemoveOrgUserForCurrentOrg)) + + // invites + r.Get("/invites", wrap(GetPendingOrgInvites)) + r.Post("/invites", quota("user"), bind(dtos.AddInviteForm{}), wrap(AddOrgInvite)) + r.Patch("/invites/:code/revoke", wrap(RevokeInvite)) }, regOrgAdmin) // create new org - r.Post("/orgs", bind(m.CreateOrgCommand{}), wrap(CreateOrg)) + r.Post("/orgs", quota("org"), bind(m.CreateOrgCommand{}), wrap(CreateOrg)) // search all orgs r.Get("/orgs", reqGrafanaAdmin, wrap(SearchOrgs)) // orgs (admin routes) r.Group("/orgs/:orgId", func() { - r.Put("/", bind(m.UpdateOrgCommand{}), wrap(UpdateOrg)) + r.Get("/", wrap(GetOrgById)) + r.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrg)) + r.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddress)) + r.Delete("/", wrap(DeleteOrgById)) r.Get("/users", wrap(GetOrgUsers)) r.Post("/users", bind(m.AddOrgUserCommand{}), wrap(AddOrgUser)) r.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUser)) r.Delete("/users/:userId", wrap(RemoveOrgUser)) + r.Get("/quotas", wrap(GetOrgQuotas)) + r.Put("/quotas/:target", bind(m.UpdateOrgQuotaCmd{}), wrap(UpdateOrgQuota)) }, reqGrafanaAdmin) // auth api keys r.Group("/auth/keys", func() { r.Get("/", wrap(GetApiKeys)) - r.Post("/", bind(m.AddApiKeyCommand{}), wrap(AddApiKey)) + r.Post("/", quota("api_key"), bind(m.AddApiKeyCommand{}), wrap(AddApiKey)) r.Delete("/:id", wrap(DeleteApiKey)) }, regOrgAdmin) // Data sources r.Group("/datasources", func() { r.Get("/", GetDataSources) - r.Post("/", bind(m.AddDataSourceCommand{}), AddDataSource) + r.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource) r.Put("/:id", bind(m.UpdateDataSourceCommand{}), UpdateDataSource) r.Delete("/:id", DeleteDataSource) - r.Get("/:id", GetDataSourceById) + r.Get("/:id", wrap(GetDataSourceById)) r.Get("/plugins", GetDataSourcePlugins) }, regOrgAdmin) @@ -141,6 +170,7 @@ func Register(r *macaron.Macaron) { // metrics r.Get("/metrics/test", GetTestMetrics) + }, reqSignedIn) // admin api @@ -150,6 +180,8 @@ func Register(r *macaron.Macaron) { r.Put("/users/:id/password", bind(dtos.AdminUpdateUserPasswordForm{}), AdminUpdateUserPassword) r.Put("/users/:id/permissions", bind(dtos.AdminUpdateUserPermissionsForm{}), AdminUpdateUserPermissions) r.Delete("/users/:id", AdminDeleteUser) + r.Get("/users/:id/quotas", wrap(GetUserQuotas)) + r.Put("/users/:id/quotas/:target", bind(m.UpdateUserQuotaCmd{}), wrap(UpdateUserQuota)) }, reqGrafanaAdmin) // rendering diff --git a/pkg/api/cloudwatch/cloudwatch.go b/pkg/api/cloudwatch/cloudwatch.go new file mode 100644 index 0000000000000..f4cde0ebd6ae0 --- /dev/null +++ b/pkg/api/cloudwatch/cloudwatch.go @@ -0,0 +1,184 @@ +package cloudwatch + +import ( + "encoding/json" + "errors" + "io/ioutil" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/grafana/grafana/pkg/middleware" + m "github.com/grafana/grafana/pkg/models" +) + +type actionHandler func(*cwRequest, *middleware.Context) + +var actionHandlers map[string]actionHandler + +type cwRequest struct { + Region string `json:"region"` + Action string `json:"action"` + Body []byte `json:"-"` + DataSource *m.DataSource +} + +func init() { + actionHandlers = map[string]actionHandler{ + "GetMetricStatistics": handleGetMetricStatistics, + "ListMetrics": handleListMetrics, + "DescribeInstances": handleDescribeInstances, + "__GetRegions": handleGetRegions, + "__GetNamespaces": handleGetNamespaces, + "__GetMetrics": handleGetMetrics, + "__GetDimensions": handleGetDimensions, + } +} + +func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) { + sess := session.New() + creds := credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: req.DataSource.Database}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute}, + }) + + cfg := &aws.Config{ + Region: aws.String(req.Region), + Credentials: creds, + } + + svc := cloudwatch.New(session.New(cfg), cfg) + + reqParam := &struct { + Parameters struct { + Namespace string `json:"namespace"` + MetricName string `json:"metricName"` + Dimensions []*cloudwatch.Dimension `json:"dimensions"` + Statistics []*string `json:"statistics"` + StartTime int64 `json:"startTime"` + EndTime int64 `json:"endTime"` + Period int64 `json:"period"` + } `json:"parameters"` + }{} + json.Unmarshal(req.Body, reqParam) + + params := &cloudwatch.GetMetricStatisticsInput{ + Namespace: aws.String(reqParam.Parameters.Namespace), + MetricName: aws.String(reqParam.Parameters.MetricName), + Dimensions: reqParam.Parameters.Dimensions, + Statistics: reqParam.Parameters.Statistics, + StartTime: aws.Time(time.Unix(reqParam.Parameters.StartTime, 0)), + EndTime: aws.Time(time.Unix(reqParam.Parameters.EndTime, 0)), + Period: aws.Int64(reqParam.Parameters.Period), + } + + resp, err := svc.GetMetricStatistics(params) + if err != nil { + c.JsonApiErr(500, "Unable to call AWS API", err) + return + } + + c.JSON(200, resp) +} + +func handleListMetrics(req *cwRequest, c *middleware.Context) { + sess := session.New() + creds := credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: req.DataSource.Database}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute}, + }) + + cfg := &aws.Config{ + Region: aws.String(req.Region), + Credentials: creds, + } + + svc := cloudwatch.New(session.New(cfg), cfg) + + reqParam := &struct { + Parameters struct { + Namespace string `json:"namespace"` + MetricName string `json:"metricName"` + Dimensions []*cloudwatch.DimensionFilter `json:"dimensions"` + } `json:"parameters"` + }{} + json.Unmarshal(req.Body, reqParam) + + params := &cloudwatch.ListMetricsInput{ + Namespace: aws.String(reqParam.Parameters.Namespace), + MetricName: aws.String(reqParam.Parameters.MetricName), + Dimensions: reqParam.Parameters.Dimensions, + } + + resp, err := svc.ListMetrics(params) + if err != nil { + c.JsonApiErr(500, "Unable to call AWS API", err) + return + } + + c.JSON(200, resp) +} + +func handleDescribeInstances(req *cwRequest, c *middleware.Context) { + sess := session.New() + creds := credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: req.DataSource.Database}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute}, + }) + + cfg := &aws.Config{ + Region: aws.String(req.Region), + Credentials: creds, + } + + svc := ec2.New(session.New(cfg), cfg) + + reqParam := &struct { + Parameters struct { + Filters []*ec2.Filter `json:"filters"` + InstanceIds []*string `json:"instanceIds"` + } `json:"parameters"` + }{} + json.Unmarshal(req.Body, reqParam) + + params := &ec2.DescribeInstancesInput{} + if len(reqParam.Parameters.Filters) > 0 { + params.Filters = reqParam.Parameters.Filters + } + if len(reqParam.Parameters.InstanceIds) > 0 { + params.InstanceIds = reqParam.Parameters.InstanceIds + } + + resp, err := svc.DescribeInstances(params) + if err != nil { + c.JsonApiErr(500, "Unable to call AWS API", err) + return + } + + c.JSON(200, resp) +} + +func HandleRequest(c *middleware.Context, ds *m.DataSource) { + var req cwRequest + req.Body, _ = ioutil.ReadAll(c.Req.Request.Body) + req.DataSource = ds + json.Unmarshal(req.Body, &req) + + if handler, found := actionHandlers[req.Action]; !found { + c.JsonApiErr(500, "Unexpected AWS Action", errors.New(req.Action)) + return + } else { + handler(&req, c) + } +} diff --git a/pkg/api/cloudwatch/metrics.go b/pkg/api/cloudwatch/metrics.go new file mode 100644 index 0000000000000..40cf974298d09 --- /dev/null +++ b/pkg/api/cloudwatch/metrics.go @@ -0,0 +1,146 @@ +package cloudwatch + +import ( + "encoding/json" + "sort" + + "github.com/grafana/grafana/pkg/middleware" + "github.com/grafana/grafana/pkg/util" +) + +var metricsMap map[string][]string +var dimensionsMap map[string][]string + +func init() { + metricsMap = map[string][]string{ + "AWS/AutoScaling": {"GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity", "GroupInServiceInstances", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"}, + "AWS/Billing": {"EstimatedCharges"}, + "AWS/EC2": {"CPUCreditUsage", "CPUCreditBalance", "CPUUtilization", "DiskReadOps", "DiskWriteOps", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut", "StatusCheckFailed", "StatusCheckFailed_Instance", "StatusCheckFailed_System"}, + "AWS/ECS": {"CPUUtilization", "MemoryUtilization"}, + "AWS/CloudFront": {"Requests", "BytesDownloaded", "BytesUploaded", "TotalErrorRate", "4xxErrorRate", "5xxErrorRate"}, + "AWS/CloudSearch": {"SuccessfulRequests", "SearchableDocuments", "IndexUtilization", "Partitions"}, + "AWS/DynamoDB": {"ConditionalCheckFailedRequests", "ConsumedReadCapacityUnits", "ConsumedWriteCapacityUnits", "OnlineIndexConsumedWriteCapacity", "OnlineIndexPercentageProgress", "OnlineIndexThrottleEvents", "ProvisionedReadCapacityUnits", "ProvisionedWriteCapacityUnits", "ReadThrottleEvents", "ReturnedItemCount", "SuccessfulRequestLatency", "SystemErrors", "ThrottledRequests", "UserErrors", "WriteThrottleEvents"}, + "AWS/ElastiCache": { + "CPUUtilization", "SwapUsage", "FreeableMemory", "NetworkBytesIn", "NetworkBytesOut", + "BytesUsedForCacheItems", "BytesReadIntoMemcached", "BytesWrittenOutFromMemcached", "CasBadval", "CasHits", "CasMisses", "CmdFlush", "CmdGet", "CmdSet", "CurrConnections", "CurrItems", "DecrHits", "DecrMisses", "DeleteHits", "DeleteMisses", "Evictions", "GetHits", "GetMisses", "IncrHits", "IncrMisses", "Reclaimed", + "CurrConnections", "Evictions", "Reclaimed", "NewConnections", "BytesUsedForCache", "CacheHits", "CacheMisses", "ReplicationLag", "GetTypeCmds", "SetTypeCmds", "KeyBasedCmds", "StringBasedCmds", "HashBasedCmds", "ListBasedCmds", "SetBasedCmds", "SortedSetBasedCmds", "CurrItems", + }, + "AWS/EBS": {"VolumeReadBytes", "VolumeWriteBytes", "VolumeReadOps", "VolumeWriteOps", "VolumeTotalReadTime", "VolumeTotalWriteTime", "VolumeIdleTime", "VolumeQueueLength", "VolumeThroughputPercentage", "VolumeConsumedReadWriteOps"}, + "AWS/ELB": {"HealthyHostCount", "UnHealthyHostCount", "RequestCount", "Latency", "HTTPCode_ELB_4XX", "HTTPCode_ELB_5XX", "HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX", "BackendConnectionErrors", "SurgeQueueLength", "SpilloverCount"}, + "AWS/ElasticMapReduce": {"CoreNodesPending", "CoreNodesRunning", "HBaseBackupFailed", "HBaseMostRecentBackupDuration", "HBaseTimeSinceLastSuccessfulBackup", "HDFSBytesRead", "HDFSBytesWritten", "HDFSUtilization", "IsIdle", "JobsFailed", "JobsRunning", "LiveDataNodes", "LiveTaskTrackers", "MapSlotsOpen", "MissingBlocks", "ReduceSlotsOpen", "RemainingMapTasks", "RemainingMapTasksPerSlot", "RemainingReduceTasks", "RunningMapTasks", "RunningReduceTasks", "S3BytesRead", "S3BytesWritten", "TaskNodesPending", "TaskNodesRunning", "TotalLoad"}, + "AWS/Kinesis": {"PutRecord.Bytes", "PutRecord.Latency", "PutRecord.Success", "PutRecords.Bytes", "PutRecords.Latency", "PutRecords.Records", "PutRecords.Success", "IncomingBytes", "IncomingRecords", "GetRecords.Bytes", "GetRecords.IteratorAgeMilliseconds", "GetRecords.Latency", "GetRecords.Success"}, + "AWS/ML": {"PredictCount", "PredictFailureCount"}, + "AWS/OpsWorks": {"cpu_idle", "cpu_nice", "cpu_system", "cpu_user", "cpu_waitio", "load_1", "load_5", "load_15", "memory_buffers", "memory_cached", "memory_free", "memory_swap", "memory_total", "memory_used", "procs"}, + "AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"}, + "AWS/RDS": {"BinLogDiskUsage", "CPUUtilization", "DatabaseConnections", "DiskQueueDepth", "FreeableMemory", "FreeStorageSpace", "ReplicaLag", "SwapUsage", "ReadIOPS", "WriteIOPS", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "NetworkReceiveThroughput", "NetworkTransmitThroughput"}, + "AWS/Route53": {"HealthCheckStatus", "HealthCheckPercentageHealthy"}, + "AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"}, + "AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"}, + "AWS/S3": {"BucketSizeBytes", "NumberOfObjects"}, + "AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut"}, + "AWS/StorageGateway": {"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "CloudBytesDownloaded", "CloudDownloadLatency", "CloudBytesUploaded", "UploadBufferFree", "UploadBufferPercentUsed", "UploadBufferUsed", "QueuedWrites", "ReadBytes", "ReadTime", "TotalCacheSize", "WriteBytes", "WriteTime", "WorkingStorageFree", "WorkingStoragePercentUsed", "WorkingStorageUsed", "CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "ReadBytes", "ReadTime", "WriteBytes", "WriteTime", "QueuedWrites"}, + "AWS/WorkSpaces": {"Available", "Unhealthy", "ConnectionAttempt", "ConnectionSuccess", "ConnectionFailure", "SessionLaunchTime", "InSessionLatency", "SessionDisconnect"}, + } + dimensionsMap = map[string][]string{ + "AWS/AutoScaling": {"AutoScalingGroupName"}, + "AWS/Billing": {"ServiceName", "LinkedAccount", "Currency"}, + "AWS/CloudFront": {"DistributionId", "Region"}, + "AWS/CloudSearch": {}, + "AWS/DynamoDB": {"TableName", "GlobalSecondaryIndexName", "Operation"}, + "AWS/ElastiCache": {"CacheClusterId", "CacheNodeId"}, + "AWS/EBS": {"VolumeId"}, + "AWS/EC2": {"AutoScalingGroupName", "ImageId", "InstanceId", "InstanceType"}, + "AWS/ECS": {"ClusterName", "ServiceName"}, + "AWS/ELB": {"LoadBalancerName", "AvailabilityZone"}, + "AWS/ElasticMapReduce": {"ClusterId", "JobId"}, + "AWS/Kinesis": {"StreamName"}, + "AWS/ML": {"MLModelId", "RequestMode"}, + "AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"}, + "AWS/Redshift": {"NodeID", "ClusterIdentifier"}, + "AWS/RDS": {"DBInstanceIdentifier", "DatabaseClass", "EngineName"}, + "AWS/Route53": {"HealthCheckId"}, + "AWS/SNS": {"Application", "Platform", "TopicName"}, + "AWS/SQS": {"QueueName"}, + "AWS/S3": {"BucketName", "StorageType"}, + "AWS/SWF": {"Domain", "ActivityTypeName", "ActivityTypeVersion"}, + "AWS/StorageGateway": {"GatewayId", "GatewayName", "VolumeId"}, + "AWS/WorkSpaces": {"DirectoryId", "WorkspaceId"}, + } +} + +// Whenever this list is updated, frontend list should also be updated. +// Please update the region list in public/app/plugins/datasource/cloudwatch/partials/config.html +func handleGetRegions(req *cwRequest, c *middleware.Context) { + regions := []string{ + "ap-northeast-1", "ap-southeast-1", "ap-southeast-2", "cn-north-1", + "eu-central-1", "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", + } + + result := []interface{}{} + for _, region := range regions { + result = append(result, util.DynMap{"text": region, "value": region}) + } + + c.JSON(200, result) +} + +func handleGetNamespaces(req *cwRequest, c *middleware.Context) { + keys := []string{} + for key := range metricsMap { + keys = append(keys, key) + } + sort.Sort(sort.StringSlice(keys)) + + result := []interface{}{} + for _, key := range keys { + result = append(result, util.DynMap{"text": key, "value": key}) + } + + c.JSON(200, result) +} + +func handleGetMetrics(req *cwRequest, c *middleware.Context) { + reqParam := &struct { + Parameters struct { + Namespace string `json:"namespace"` + } `json:"parameters"` + }{} + + json.Unmarshal(req.Body, reqParam) + + namespaceMetrics, exists := metricsMap[reqParam.Parameters.Namespace] + if !exists { + c.JsonApiErr(404, "Unable to find namespace "+reqParam.Parameters.Namespace, nil) + return + } + + result := []interface{}{} + for _, name := range namespaceMetrics { + result = append(result, util.DynMap{"text": name, "value": name}) + } + + c.JSON(200, result) +} + +func handleGetDimensions(req *cwRequest, c *middleware.Context) { + reqParam := &struct { + Parameters struct { + Namespace string `json:"namespace"` + } `json:"parameters"` + }{} + + json.Unmarshal(req.Body, reqParam) + + dimensionValues, exists := dimensionsMap[reqParam.Parameters.Namespace] + if !exists { + c.JsonApiErr(404, "Unable to find dimension "+reqParam.Parameters.Namespace, nil) + return + } + + result := []interface{}{} + for _, name := range dimensionValues { + result = append(result, util.DynMap{"text": name, "value": name}) + } + + c.JSON(200, result) +} diff --git a/pkg/api/dashboard.go b/pkg/api/dashboard.go index a10c3c92f96f8..6490a1188612f 100644 --- a/pkg/api/dashboard.go +++ b/pkg/api/dashboard.go @@ -56,7 +56,9 @@ func GetDashboard(c *middleware.Context) { Type: m.DashTypeDB, CanStar: c.IsSignedIn, CanSave: c.OrgRole == m.ROLE_ADMIN || c.OrgRole == m.ROLE_EDITOR, - CanEdit: c.OrgRole == m.ROLE_ADMIN || c.OrgRole == m.ROLE_EDITOR || c.OrgRole == m.ROLE_READ_ONLY_EDITOR, + CanEdit: canEditDashboard(c.OrgRole), + Created: dash.Created, + Updated: dash.Updated, }, } @@ -86,6 +88,19 @@ func DeleteDashboard(c *middleware.Context) { func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) { cmd.OrgId = c.OrgId + dash := cmd.GetDashboardModel() + if dash.Id == 0 { + limitReached, err := middleware.QuotaReached(c, "dashboard") + if err != nil { + c.JsonApiErr(500, "failed to get quota", err) + return + } + if limitReached { + c.JsonApiErr(403, "Quota reached", nil) + return + } + } + err := bus.Dispatch(&cmd) if err != nil { if err == m.ErrDashboardWithSameNameExists { @@ -109,6 +124,10 @@ func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) { c.JSON(200, util.DynMap{"status": "success", "slug": cmd.Result.Slug, "version": cmd.Result.Version}) } +func canEditDashboard(role m.RoleType) bool { + return role == m.ROLE_ADMIN || role == m.ROLE_EDITOR || role == m.ROLE_READ_ONLY_EDITOR +} + func GetHomeDashboard(c *middleware.Context) { filePath := path.Join(setting.StaticRootPath, "dashboards/home.json") file, err := os.Open(filePath) @@ -119,6 +138,7 @@ func GetHomeDashboard(c *middleware.Context) { dash := dtos.DashboardFullWithMeta{} dash.Meta.IsHome = true + dash.Meta.CanEdit = canEditDashboard(c.OrgRole) jsonParser := json.NewDecoder(file) if err := jsonParser.Decode(&dash.Dashboard); err != nil { c.JsonApiErr(500, "Failed to load home dashboard", err) @@ -139,6 +159,7 @@ func GetDashboardFromJsonFile(c *middleware.Context) { dash := dtos.DashboardFullWithMeta{Dashboard: dashboard.Data} dash.Meta.Type = m.DashTypeJson + dash.Meta.CanEdit = canEditDashboard(c.OrgRole) c.JSON(200, &dash) } diff --git a/pkg/api/dataproxy.go b/pkg/api/dataproxy.go index 11075294b66d7..7193198155f73 100644 --- a/pkg/api/dataproxy.go +++ b/pkg/api/dataproxy.go @@ -8,9 +8,11 @@ import ( "net/url" "time" + "github.com/grafana/grafana/pkg/api/cloudwatch" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/util" ) @@ -24,51 +26,82 @@ var dataProxyTransport = &http.Transport{ TLSHandshakeTimeout: 10 * time.Second, } -func NewReverseProxy(ds *m.DataSource, proxyPath string) *httputil.ReverseProxy { - target, _ := url.Parse(ds.Url) - +func NewReverseProxy(ds *m.DataSource, proxyPath string, targetUrl *url.URL) *httputil.ReverseProxy { director := func(req *http.Request) { - req.URL.Scheme = target.Scheme - req.URL.Host = target.Host - req.Host = target.Host + req.URL.Scheme = targetUrl.Scheme + req.URL.Host = targetUrl.Host + req.Host = targetUrl.Host reqQueryVals := req.URL.Query() if ds.Type == m.DS_INFLUXDB_08 { - req.URL.Path = util.JoinUrlFragments(target.Path, "db/"+ds.Database+"/"+proxyPath) + req.URL.Path = util.JoinUrlFragments(targetUrl.Path, "db/"+ds.Database+"/"+proxyPath) reqQueryVals.Add("u", ds.User) reqQueryVals.Add("p", ds.Password) req.URL.RawQuery = reqQueryVals.Encode() } else if ds.Type == m.DS_INFLUXDB { - req.URL.Path = util.JoinUrlFragments(target.Path, proxyPath) + req.URL.Path = util.JoinUrlFragments(targetUrl.Path, proxyPath) reqQueryVals.Add("db", ds.Database) - reqQueryVals.Add("u", ds.User) - reqQueryVals.Add("p", ds.Password) req.URL.RawQuery = reqQueryVals.Encode() + if !ds.BasicAuth { + req.Header.Del("Authorization") + req.Header.Add("Authorization", util.GetBasicAuthHeader(ds.User, ds.Password)) + } } else { - req.URL.Path = util.JoinUrlFragments(target.Path, proxyPath) + req.URL.Path = util.JoinUrlFragments(targetUrl.Path, proxyPath) } if ds.BasicAuth { + req.Header.Del("Authorization") req.Header.Add("Authorization", util.GetBasicAuthHeader(ds.BasicAuthUser, ds.BasicAuthPassword)) } + + // clear cookie headers + req.Header.Del("Cookie") + req.Header.Del("Set-Cookie") } return &httputil.ReverseProxy{Director: director} } -//ProxyDataSourceRequest TODO need to cache datasources -func ProxyDataSourceRequest(c *middleware.Context) { - id := c.ParamsInt64(":id") - query := m.GetDataSourceByIdQuery{Id: id, OrgId: c.OrgId} +var dsMap map[int64]*m.DataSource = make(map[int64]*m.DataSource) + +func getDatasource(id int64, orgId int64) (*m.DataSource, error) { + // ds, exists := dsMap[id] + // if exists && ds.OrgId == orgId { + // return ds, nil + // } + query := m.GetDataSourceByIdQuery{Id: id, OrgId: orgId} if err := bus.Dispatch(&query); err != nil { + return nil, err + } + + dsMap[id] = &query.Result + return &query.Result, nil +} + +func ProxyDataSourceRequest(c *middleware.Context) { + ds, err := getDatasource(c.ParamsInt64(":id"), c.OrgId) + if err != nil { c.JsonApiErr(500, "Unable to load datasource meta data", err) return } - proxyPath := c.Params("*") - proxy := NewReverseProxy(&query.Result, proxyPath) - proxy.Transport = dataProxyTransport - proxy.ServeHTTP(c.RW(), c.Req.Request) + targetUrl, _ := url.Parse(ds.Url) + if len(setting.DataProxyWhiteList) > 0 { + if _, exists := setting.DataProxyWhiteList[targetUrl.Host]; !exists { + c.JsonApiErr(403, "Data proxy hostname and ip are not included in whitelist", nil) + return + } + } + + if ds.Type == m.DS_CLOUDWATCH { + cloudwatch.HandleRequest(c, ds) + } else { + proxyPath := c.Params("*") + proxy := NewReverseProxy(ds, proxyPath, targetUrl) + proxy.Transport = dataProxyTransport + proxy.ServeHTTP(c.RW(), c.Req.Request) + } } diff --git a/pkg/api/dataproxy_test.go b/pkg/api/dataproxy_test.go index 842730891ae88..0e561c726e128 100644 --- a/pkg/api/dataproxy_test.go +++ b/pkg/api/dataproxy_test.go @@ -14,7 +14,8 @@ func TestDataSourceProxy(t *testing.T) { Convey("When getting graphite datasource proxy", t, func() { ds := m.DataSource{Url: "htttp://graphite:8080", Type: m.DS_GRAPHITE} - proxy := NewReverseProxy(&ds, "/render") + targetUrl, _ := url.Parse(ds.Url) + proxy := NewReverseProxy(&ds, "/render", targetUrl) requestUrl, _ := url.Parse("http://grafana.com/sub") req := http.Request{URL: requestUrl} @@ -36,7 +37,8 @@ func TestDataSourceProxy(t *testing.T) { Password: "password", } - proxy := NewReverseProxy(&ds, "") + targetUrl, _ := url.Parse(ds.Url) + proxy := NewReverseProxy(&ds, "", targetUrl) requestUrl, _ := url.Parse("http://grafana.com/sub") req := http.Request{URL: requestUrl} diff --git a/pkg/api/datasources.go b/pkg/api/datasources.go index e0253df3cdb48..38a4004c241ea 100644 --- a/pkg/api/datasources.go +++ b/pkg/api/datasources.go @@ -37,20 +37,22 @@ func GetDataSources(c *middleware.Context) { c.JSON(200, result) } -func GetDataSourceById(c *middleware.Context) { +func GetDataSourceById(c *middleware.Context) Response { query := m.GetDataSourceByIdQuery{ Id: c.ParamsInt64(":id"), OrgId: c.OrgId, } if err := bus.Dispatch(&query); err != nil { - c.JsonApiErr(500, "Failed to query datasources", err) - return + if err == m.ErrDataSourceNotFound { + return ApiError(404, "Data source not found", nil) + } + return ApiError(500, "Failed to query datasources", err) } ds := query.Result - c.JSON(200, &dtos.DataSource{ + return Json(200, &dtos.DataSource{ Id: ds.Id, OrgId: ds.OrgId, Name: ds.Name, @@ -112,5 +114,13 @@ func UpdateDataSource(c *middleware.Context, cmd m.UpdateDataSourceCommand) { } func GetDataSourcePlugins(c *middleware.Context) { - c.JSON(200, plugins.DataSources) + dsList := make(map[string]interface{}) + + for key, value := range plugins.DataSources { + if value.(map[string]interface{})["builtIn"] == nil { + dsList[key] = value + } + } + + c.JSON(200, dsList) } diff --git a/pkg/api/dtos/invite.go b/pkg/api/dtos/invite.go new file mode 100644 index 0000000000000..3f002a8b157e6 --- /dev/null +++ b/pkg/api/dtos/invite.go @@ -0,0 +1,26 @@ +package dtos + +import m "github.com/grafana/grafana/pkg/models" + +type AddInviteForm struct { + LoginOrEmail string `json:"loginOrEmail" binding:"Required"` + Name string `json:"name"` + Role m.RoleType `json:"role" binding:"Required"` + SkipEmails bool `json:"skipEmails"` +} + +type InviteInfo struct { + Email string `json:"email"` + Name string `json:"name"` + Username string `json:"username"` + InvitedBy string `json:"invitedBy"` +} + +type CompleteInviteForm struct { + InviteCode string `json:"inviteCode"` + Email string `json:"email" binding:"Required"` + Name string `json:"name"` + Username string `json:"username"` + Password string `json:"password"` + ConfirmPassword string `json:"confirmPassword"` +} diff --git a/pkg/api/dtos/models.go b/pkg/api/dtos/models.go index 3e1826f56fbfe..7af4c84f56dca 100644 --- a/pkg/api/dtos/models.go +++ b/pkg/api/dtos/models.go @@ -17,6 +17,7 @@ type LoginCommand struct { type CurrentUser struct { IsSignedIn bool `json:"isSignedIn"` + Id int64 `json:"id"` Login string `json:"login"` Email string `json:"email"` Name string `json:"name"` @@ -39,6 +40,7 @@ type DashboardMeta struct { Slug string `json:"slug"` Expires time.Time `json:"expires"` Created time.Time `json:"created"` + Updated time.Time `json:"updated"` } type DashboardFullWithMeta struct { diff --git a/pkg/api/dtos/org.go b/pkg/api/dtos/org.go new file mode 100644 index 0000000000000..b4bb26ca78a37 --- /dev/null +++ b/pkg/api/dtos/org.go @@ -0,0 +1,14 @@ +package dtos + +type UpdateOrgForm struct { + Name string `json:"name" binding:"Required"` +} + +type UpdateOrgAddressForm struct { + Address1 string `json:"address1"` + Address2 string `json:"address2"` + City string `json:"city"` + ZipCode string `json:"zipcode"` + State string `json:"state"` + Country string `json:"country"` +} diff --git a/pkg/api/dtos/user.go b/pkg/api/dtos/user.go index 9b407535429ee..dbbe24a159bfb 100644 --- a/pkg/api/dtos/user.go +++ b/pkg/api/dtos/user.go @@ -1,5 +1,18 @@ package dtos +type SignUpForm struct { + Email string `json:"email" binding:"Required"` +} + +type SignUpStep2Form struct { + Email string `json:"email"` + Name string `json:"name"` + Username string `json:"username"` + Password string `json:"password"` + Code string `json:"code"` + OrgName string `json:"orgName"` +} + type AdminCreateUserForm struct { Email string `json:"email"` Login string `json:"login"` diff --git a/pkg/api/frontendsettings.go b/pkg/api/frontendsettings.go index 7851f1d8f0d19..cc07b9cfb4968 100644 --- a/pkg/api/frontendsettings.go +++ b/pkg/api/frontendsettings.go @@ -81,18 +81,29 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro dsMap["index"] = ds.Database } + if ds.Type == m.DS_PROMETHEUS { + // add unproxied server URL for link to Prometheus web UI + dsMap["directUrl"] = ds.Url + } + datasources[ds.Name] = dsMap } // add grafana backend data source grafanaDatasourceMeta, _ := plugins.DataSources["grafana"] - datasources["grafana"] = map[string]interface{}{ + datasources["-- Grafana --"] = map[string]interface{}{ "type": "grafana", "meta": grafanaDatasourceMeta, } + // add mixed backend data source + datasources["-- Mixed --"] = map[string]interface{}{ + "type": "mixed", + "meta": plugins.DataSources["mixed"], + } + if defaultDatasource == "" { - defaultDatasource = "grafana" + defaultDatasource = "-- Grafana --" } jsonObj := map[string]interface{}{ diff --git a/pkg/api/index.go b/pkg/api/index.go index 8f486c4b785ee..556db006b2f2c 100644 --- a/pkg/api/index.go +++ b/pkg/api/index.go @@ -13,6 +13,7 @@ func setIndexViewData(c *middleware.Context) error { } currentUser := &dtos.CurrentUser{ + Id: c.UserId, IsSignedIn: c.IsSignedIn, Login: c.Login, Email: c.Email, @@ -47,6 +48,10 @@ func setIndexViewData(c *middleware.Context) error { c.Data["GoogleAnalyticsId"] = setting.GoogleAnalyticsId } + if setting.GoogleTagManagerId != "" { + c.Data["GoogleTagManagerId"] = setting.GoogleTagManagerId + } + return nil } diff --git a/pkg/api/login_oauth.go b/pkg/api/login_oauth.go index 796599df86473..4244feef66482 100644 --- a/pkg/api/login_oauth.go +++ b/pkg/api/login_oauth.go @@ -74,7 +74,15 @@ func OAuthLogin(ctx *middleware.Context) { ctx.Redirect(setting.AppSubUrl + "/login") return } - + limitReached, err := middleware.QuotaReached(ctx, "user") + if err != nil { + ctx.Handle(500, "Failed to get user quota", err) + return + } + if limitReached { + ctx.Redirect(setting.AppSubUrl + "/login") + return + } cmd := m.CreateUserCommand{ Login: userInfo.Email, Email: userInfo.Email, diff --git a/pkg/api/org.go b/pkg/api/org.go index 746281c51388e..6ed4a7c4a14fa 100644 --- a/pkg/api/org.go +++ b/pkg/api/org.go @@ -1,6 +1,7 @@ package api import ( + "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" @@ -30,22 +31,34 @@ func getOrgHelper(orgId int64) Response { return ApiError(500, "Failed to get organization", err) } - org := m.OrgDTO{ - Id: query.Result.Id, - Name: query.Result.Name, + org := query.Result + result := m.OrgDetailsDTO{ + Id: org.Id, + Name: org.Name, + Address: m.Address{ + Address1: org.Address1, + Address2: org.Address2, + City: org.City, + ZipCode: org.ZipCode, + State: org.State, + Country: org.Country, + }, } - return Json(200, &org) + return Json(200, &result) } // POST /api/orgs func CreateOrg(c *middleware.Context, cmd m.CreateOrgCommand) Response { if !c.IsSignedIn || (!setting.AllowUserOrgCreate && !c.IsGrafanaAdmin) { - return ApiError(401, "Access denied", nil) + return ApiError(403, "Access denied", nil) } cmd.UserId = c.UserId if err := bus.Dispatch(&cmd); err != nil { + if err == m.ErrOrgNameTaken { + return ApiError(400, "Organization name taken", err) + } return ApiError(500, "Failed to create organization", err) } @@ -58,25 +71,65 @@ func CreateOrg(c *middleware.Context, cmd m.CreateOrgCommand) Response { } // PUT /api/org -func UpdateOrgCurrent(c *middleware.Context, cmd m.UpdateOrgCommand) Response { - cmd.OrgId = c.OrgId - return updateOrgHelper(cmd) +func UpdateOrgCurrent(c *middleware.Context, form dtos.UpdateOrgForm) Response { + return updateOrgHelper(form, c.OrgId) } // PUT /api/orgs/:orgId -func UpdateOrg(c *middleware.Context, cmd m.UpdateOrgCommand) Response { - cmd.OrgId = c.ParamsInt64(":orgId") - return updateOrgHelper(cmd) +func UpdateOrg(c *middleware.Context, form dtos.UpdateOrgForm) Response { + return updateOrgHelper(form, c.ParamsInt64(":orgId")) } -func updateOrgHelper(cmd m.UpdateOrgCommand) Response { +func updateOrgHelper(form dtos.UpdateOrgForm, orgId int64) Response { + cmd := m.UpdateOrgCommand{Name: form.Name, OrgId: orgId} if err := bus.Dispatch(&cmd); err != nil { + if err == m.ErrOrgNameTaken { + return ApiError(400, "Organization name taken", err) + } return ApiError(500, "Failed to update organization", err) } return ApiSuccess("Organization updated") } +// PUT /api/org/address +func UpdateOrgAddressCurrent(c *middleware.Context, form dtos.UpdateOrgAddressForm) Response { + return updateOrgAddressHelper(form, c.OrgId) +} + +// PUT /api/orgs/:orgId/address +func UpdateOrgAddress(c *middleware.Context, form dtos.UpdateOrgAddressForm) Response { + return updateOrgAddressHelper(form, c.ParamsInt64(":orgId")) +} + +func updateOrgAddressHelper(form dtos.UpdateOrgAddressForm, orgId int64) Response { + cmd := m.UpdateOrgAddressCommand{ + OrgId: orgId, + Address: m.Address{ + Address1: form.Address1, + Address2: form.Address2, + City: form.City, + State: form.State, + ZipCode: form.ZipCode, + Country: form.Country, + }, + } + + if err := bus.Dispatch(&cmd); err != nil { + return ApiError(500, "Failed to update org address", err) + } + + return ApiSuccess("Address updated") +} + +// GET /api/orgs/:orgId +func DeleteOrgById(c *middleware.Context) Response { + if err := bus.Dispatch(&m.DeleteOrgCommand{Id: c.ParamsInt64(":orgId")}); err != nil { + return ApiError(500, "Failed to update organization", err) + } + return ApiSuccess("Organization deleted") +} + func SearchOrgs(c *middleware.Context) Response { query := m.SearchOrgsQuery{ Query: c.Query("query"), diff --git a/pkg/api/org_invite.go b/pkg/api/org_invite.go new file mode 100644 index 0000000000000..8d916677ed27f --- /dev/null +++ b/pkg/api/org_invite.go @@ -0,0 +1,219 @@ +package api + +import ( + "fmt" + + "github.com/grafana/grafana/pkg/api/dtos" + "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/events" + "github.com/grafana/grafana/pkg/metrics" + "github.com/grafana/grafana/pkg/middleware" + m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" + "github.com/grafana/grafana/pkg/util" +) + +func GetPendingOrgInvites(c *middleware.Context) Response { + query := m.GetTempUsersQuery{OrgId: c.OrgId, Status: m.TmpUserInvitePending} + + if err := bus.Dispatch(&query); err != nil { + return ApiError(500, "Failed to get invites from db", err) + } + + for _, invite := range query.Result { + invite.Url = setting.ToAbsUrl("invite/" + invite.Code) + } + + return Json(200, query.Result) +} + +func AddOrgInvite(c *middleware.Context, inviteDto dtos.AddInviteForm) Response { + if !inviteDto.Role.IsValid() { + return ApiError(400, "Invalid role specified", nil) + } + + // first try get existing user + userQuery := m.GetUserByLoginQuery{LoginOrEmail: inviteDto.LoginOrEmail} + if err := bus.Dispatch(&userQuery); err != nil { + if err != m.ErrUserNotFound { + return ApiError(500, "Failed to query db for existing user check", err) + } + } else { + return inviteExistingUserToOrg(c, userQuery.Result, &inviteDto) + } + + cmd := m.CreateTempUserCommand{} + cmd.OrgId = c.OrgId + cmd.Email = inviteDto.LoginOrEmail + cmd.Name = inviteDto.Name + cmd.Status = m.TmpUserInvitePending + cmd.InvitedByUserId = c.UserId + cmd.Code = util.GetRandomString(30) + cmd.Role = inviteDto.Role + cmd.RemoteAddr = c.Req.RemoteAddr + + if err := bus.Dispatch(&cmd); err != nil { + return ApiError(500, "Failed to save invite to database", err) + } + + // send invite email + if !inviteDto.SkipEmails && util.IsEmail(inviteDto.LoginOrEmail) { + emailCmd := m.SendEmailCommand{ + To: []string{inviteDto.LoginOrEmail}, + Template: "new_user_invite.html", + Data: map[string]interface{}{ + "Name": util.StringsFallback2(cmd.Name, cmd.Email), + "OrgName": c.OrgName, + "Email": c.Email, + "LinkUrl": setting.ToAbsUrl("invite/" + cmd.Code), + "InvitedBy": util.StringsFallback3(c.Name, c.Email, c.Login), + }, + } + + if err := bus.Dispatch(&emailCmd); err != nil { + return ApiError(500, "Failed to send email invite", err) + } + + return ApiSuccess(fmt.Sprintf("Sent invite to %s", inviteDto.LoginOrEmail)) + } + + return ApiSuccess(fmt.Sprintf("Created invite for %s", inviteDto.LoginOrEmail)) +} + +func inviteExistingUserToOrg(c *middleware.Context, user *m.User, inviteDto *dtos.AddInviteForm) Response { + // user exists, add org role + createOrgUserCmd := m.AddOrgUserCommand{OrgId: c.OrgId, UserId: user.Id, Role: inviteDto.Role} + if err := bus.Dispatch(&createOrgUserCmd); err != nil { + if err == m.ErrOrgUserAlreadyAdded { + return ApiError(412, fmt.Sprintf("User %s is already added to organization", inviteDto.LoginOrEmail), err) + } + return ApiError(500, "Error while trying to create org user", err) + } else { + + if !inviteDto.SkipEmails && util.IsEmail(user.Email) { + emailCmd := m.SendEmailCommand{ + To: []string{user.Email}, + Template: "invited_to_org.html", + Data: map[string]interface{}{ + "Name": user.NameOrFallback(), + "OrgName": c.OrgName, + "InvitedBy": util.StringsFallback3(c.Name, c.Email, c.Login), + }, + } + + if err := bus.Dispatch(&emailCmd); err != nil { + return ApiError(500, "Failed to send email invited_to_org", err) + } + } + + return ApiSuccess(fmt.Sprintf("Existing Grafana user %s added to org %s", user.NameOrFallback(), c.OrgName)) + } +} + +func RevokeInvite(c *middleware.Context) Response { + if ok, rsp := updateTempUserStatus(c.Params(":code"), m.TmpUserRevoked); !ok { + return rsp + } + + return ApiSuccess("Invite revoked") +} + +func GetInviteInfoByCode(c *middleware.Context) Response { + query := m.GetTempUserByCodeQuery{Code: c.Params(":code")} + + if err := bus.Dispatch(&query); err != nil { + if err == m.ErrTempUserNotFound { + return ApiError(404, "Invite not found", nil) + } + return ApiError(500, "Failed to get invite", err) + } + + invite := query.Result + + return Json(200, dtos.InviteInfo{ + Email: invite.Email, + Name: invite.Name, + Username: invite.Email, + InvitedBy: util.StringsFallback3(invite.InvitedByName, invite.InvitedByLogin, invite.InvitedByEmail), + }) +} + +func CompleteInvite(c *middleware.Context, completeInvite dtos.CompleteInviteForm) Response { + query := m.GetTempUserByCodeQuery{Code: completeInvite.InviteCode} + + if err := bus.Dispatch(&query); err != nil { + if err == m.ErrTempUserNotFound { + return ApiError(404, "Invite not found", nil) + } + return ApiError(500, "Failed to get invite", err) + } + + invite := query.Result + if invite.Status != m.TmpUserInvitePending { + return ApiError(412, fmt.Sprintf("Invite cannot be used in status %s", invite.Status), nil) + } + + cmd := m.CreateUserCommand{ + Email: completeInvite.Email, + Name: completeInvite.Name, + Login: completeInvite.Username, + Password: completeInvite.Password, + SkipOrgSetup: true, + } + + if err := bus.Dispatch(&cmd); err != nil { + return ApiError(500, "failed to create user", err) + } + + user := &cmd.Result + + bus.Publish(&events.SignUpCompleted{ + Name: user.NameOrFallback(), + Email: user.Email, + }) + + if ok, rsp := applyUserInvite(user, invite, true); !ok { + return rsp + } + + loginUserWithUser(user, c) + + metrics.M_Api_User_SignUpCompleted.Inc(1) + metrics.M_Api_User_SignUpInvite.Inc(1) + + return ApiSuccess("User created and logged in") +} + +func updateTempUserStatus(code string, status m.TempUserStatus) (bool, Response) { + // update temp user status + updateTmpUserCmd := m.UpdateTempUserStatusCommand{Code: code, Status: status} + if err := bus.Dispatch(&updateTmpUserCmd); err != nil { + return false, ApiError(500, "Failed to update invite status", err) + } + + return true, nil +} + +func applyUserInvite(user *m.User, invite *m.TempUserDTO, setActive bool) (bool, Response) { + // add to org + addOrgUserCmd := m.AddOrgUserCommand{OrgId: invite.OrgId, UserId: user.Id, Role: invite.Role} + if err := bus.Dispatch(&addOrgUserCmd); err != nil { + if err != m.ErrOrgUserAlreadyAdded { + return false, ApiError(500, "Error while trying to create org user", err) + } + } + + // update temp user status + if ok, rsp := updateTempUserStatus(invite.Code, m.TmpUserCompleted); !ok { + return false, rsp + } + + if setActive { + // set org to active + if err := bus.Dispatch(&m.SetUsingOrgCommand{OrgId: invite.OrgId, UserId: user.Id}); err != nil { + return false, ApiError(500, "Failed to set org as active", err) + } + } + + return true, nil +} diff --git a/pkg/api/quota.go b/pkg/api/quota.go new file mode 100644 index 0000000000000..d858543543075 --- /dev/null +++ b/pkg/api/quota.go @@ -0,0 +1,68 @@ +package api + +import ( + "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/middleware" + m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" +) + +func GetOrgQuotas(c *middleware.Context) Response { + if !setting.Quota.Enabled { + return ApiError(404, "Quotas not enabled", nil) + } + query := m.GetOrgQuotasQuery{OrgId: c.ParamsInt64(":orgId")} + + if err := bus.Dispatch(&query); err != nil { + return ApiError(500, "Failed to get org quotas", err) + } + + return Json(200, query.Result) +} + +func UpdateOrgQuota(c *middleware.Context, cmd m.UpdateOrgQuotaCmd) Response { + if !setting.Quota.Enabled { + return ApiError(404, "Quotas not enabled", nil) + } + cmd.OrgId = c.ParamsInt64(":orgId") + cmd.Target = c.Params(":target") + + if _, ok := setting.Quota.Org.ToMap()[cmd.Target]; !ok { + return ApiError(404, "Invalid quota target", nil) + } + + if err := bus.Dispatch(&cmd); err != nil { + return ApiError(500, "Failed to update org quotas", err) + } + return ApiSuccess("Organization quota updated") +} + +func GetUserQuotas(c *middleware.Context) Response { + if !setting.Quota.Enabled { + return ApiError(404, "Quotas not enabled", nil) + } + query := m.GetUserQuotasQuery{UserId: c.ParamsInt64(":id")} + + if err := bus.Dispatch(&query); err != nil { + return ApiError(500, "Failed to get org quotas", err) + } + + return Json(200, query.Result) +} + +func UpdateUserQuota(c *middleware.Context, cmd m.UpdateUserQuotaCmd) Response { + if !setting.Quota.Enabled { + return ApiError(404, "Quotas not enabled", nil) + } + cmd.UserId = c.ParamsInt64(":id") + cmd.Target = c.Params(":target") + + if _, ok := setting.Quota.User.ToMap()[cmd.Target]; !ok { + return ApiError(404, "Invalid quota target", nil) + } + + if err := bus.Dispatch(&cmd); err != nil { + return ApiError(500, "Failed to update org quotas", err) + } + return ApiSuccess("Organization quota updated") +} diff --git a/pkg/api/signup.go b/pkg/api/signup.go index 77305caba70bf..767b98011543d 100644 --- a/pkg/api/signup.go +++ b/pkg/api/signup.go @@ -1,38 +1,135 @@ package api import ( + "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/events" "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" + "github.com/grafana/grafana/pkg/util" ) +// GET /api/user/signup/options +func GetSignUpOptions(c *middleware.Context) Response { + return Json(200, util.DynMap{ + "verifyEmailEnabled": setting.VerifyEmailEnabled, + "autoAssignOrg": setting.AutoAssignOrg, + }) +} + // POST /api/user/signup -func SignUp(c *middleware.Context, cmd m.CreateUserCommand) Response { +func SignUp(c *middleware.Context, form dtos.SignUpForm) Response { if !setting.AllowUserSignUp { return ApiError(401, "User signup is disabled", nil) } - cmd.Login = cmd.Email + existing := m.GetUserByLoginQuery{LoginOrEmail: form.Email} + if err := bus.Dispatch(&existing); err == nil { + return ApiError(422, "User with same email address already exists", nil) + } + + cmd := m.CreateTempUserCommand{} + cmd.OrgId = -1 + cmd.Email = form.Email + cmd.Status = m.TmpUserSignUpStarted + cmd.InvitedByUserId = c.UserId + cmd.Code = util.GetRandomString(20) + cmd.RemoteAddr = c.Req.RemoteAddr if err := bus.Dispatch(&cmd); err != nil { - return ApiError(500, "failed to create user", err) + return ApiError(500, "Failed to create signup", err) } - user := cmd.Result + bus.Publish(&events.SignUpStarted{ + Email: form.Email, + Code: cmd.Code, + }) + + metrics.M_Api_User_SignUpStarted.Inc(1) + + return Json(200, util.DynMap{"status": "SignUpCreated"}) +} + +func SignUpStep2(c *middleware.Context, form dtos.SignUpStep2Form) Response { + if !setting.AllowUserSignUp { + return ApiError(401, "User signup is disabled", nil) + } + + createUserCmd := m.CreateUserCommand{ + Email: form.Email, + Login: form.Username, + Name: form.Name, + Password: form.Password, + OrgName: form.OrgName, + } + + // verify email + if setting.VerifyEmailEnabled { + if ok, rsp := verifyUserSignUpEmail(form.Email, form.Code); !ok { + return rsp + } + createUserCmd.EmailVerified = true + } - bus.Publish(&events.UserSignedUp{ - Id: user.Id, - Name: user.Name, + // check if user exists + existing := m.GetUserByLoginQuery{LoginOrEmail: form.Email} + if err := bus.Dispatch(&existing); err == nil { + return ApiError(401, "User with same email address already exists", nil) + } + + // dispatch create command + if err := bus.Dispatch(&createUserCmd); err != nil { + return ApiError(500, "Failed to create user", err) + } + + // publish signup event + user := &createUserCmd.Result + bus.Publish(&events.SignUpCompleted{ Email: user.Email, - Login: user.Login, + Name: user.NameOrFallback(), }) - loginUserWithUser(&user, c) + // mark temp user as completed + if ok, rsp := updateTempUserStatus(form.Code, m.TmpUserCompleted); !ok { + return rsp + } + + // check for pending invites + invitesQuery := m.GetTempUsersQuery{Email: form.Email, Status: m.TmpUserInvitePending} + if err := bus.Dispatch(&invitesQuery); err != nil { + return ApiError(500, "Failed to query database for invites", err) + } + + apiResponse := util.DynMap{"message": "User sign up completed succesfully", "code": "redirect-to-landing-page"} + for _, invite := range invitesQuery.Result { + if ok, rsp := applyUserInvite(user, invite, false); !ok { + return rsp + } + apiResponse["code"] = "redirect-to-select-org" + } + + loginUserWithUser(user, c) + metrics.M_Api_User_SignUpCompleted.Inc(1) + + return Json(200, apiResponse) +} - metrics.M_Api_User_SignUp.Inc(1) +func verifyUserSignUpEmail(email string, code string) (bool, Response) { + query := m.GetTempUserByCodeQuery{Code: code} + + if err := bus.Dispatch(&query); err != nil { + if err == m.ErrTempUserNotFound { + return false, ApiError(404, "Invalid email verification code", nil) + } + return false, ApiError(500, "Failed to read temp user", err) + } + + tempUser := query.Result + if tempUser.Email != email { + return false, ApiError(404, "Email verification code does not match email", nil) + } - return ApiSuccess("User created and logged in") + return true, nil } diff --git a/pkg/cmd/web.go b/pkg/cmd/web.go index c94661a5f9ad0..69843b6b09504 100644 --- a/pkg/cmd/web.go +++ b/pkg/cmd/web.go @@ -33,7 +33,7 @@ func newMacaron() *macaron.Macaron { mapStatic(m, "css", "css") mapStatic(m, "img", "img") mapStatic(m, "fonts", "fonts") - mapStatic(m, "robots.txt", "robots.txxt") + mapStatic(m, "robots.txt", "robots.txt") m.Use(macaron.Renderer(macaron.RenderOptions{ Directory: path.Join(setting.StaticRootPath, "views"), diff --git a/pkg/events/events.go b/pkg/events/events.go index 5e82578b47480..235de9c31304c 100644 --- a/pkg/events/events.go +++ b/pkg/events/events.go @@ -70,11 +70,15 @@ type UserCreated struct { Email string `json:"email"` } -type UserSignedUp struct { +type SignUpStarted struct { + Timestamp time.Time `json:"timestamp"` + Email string `json:"email"` + Code string `json:"code"` +} + +type SignUpCompleted struct { Timestamp time.Time `json:"timestamp"` - Id int64 `json:"id"` Name string `json:"name"` - Login string `json:"login"` Email string `json:"email"` } diff --git a/pkg/log/console.go b/pkg/log/console.go index 2a6bbf6a3bbee..c290cdf45f406 100644 --- a/pkg/log/console.go +++ b/pkg/log/console.go @@ -45,15 +45,17 @@ var ( // ConsoleWriter implements LoggerInterface and writes messages to terminal. type ConsoleWriter struct { - lg *log.Logger - Level int `json:"level"` + lg *log.Logger + Level int `json:"level"` + Formatting bool `json:"formatting"` } // create ConsoleWriter returning as LoggerInterface. func NewConsole() LoggerInterface { return &ConsoleWriter{ - lg: log.New(os.Stderr, "", log.Ldate|log.Ltime), - Level: TRACE, + lg: log.New(os.Stderr, "", log.Ldate|log.Ltime), + Level: TRACE, + Formatting: true, } } @@ -65,7 +67,7 @@ func (cw *ConsoleWriter) WriteMsg(msg string, skip, level int) error { if cw.Level > level { return nil } - if runtime.GOOS == "windows" { + if runtime.GOOS == "windows" || !cw.Formatting { cw.lg.Println(msg) } else { cw.lg.Println(colors[level](msg)) diff --git a/pkg/log/file.go b/pkg/log/file.go index e9402815f0b06..8f005d9723b9e 100644 --- a/pkg/log/file.go +++ b/pkg/log/file.go @@ -144,7 +144,7 @@ func (w *FileLogWriter) WriteMsg(msg string, skip, level int) error { func (w *FileLogWriter) createLogFile() (*os.File, error) { // Open the log file - return os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660) + return os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) } func (w *FileLogWriter) initFd() error { diff --git a/pkg/log/log.go b/pkg/log/log.go index d2e31cffb64c9..a7d26ce7913ee 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -82,7 +82,11 @@ func Fatal(skip int, format string, v ...interface{}) { func Close() { for _, l := range loggers { l.Close() + // delete the logger. + l = nil } + // clear the loggers slice. + loggers = nil } // .___ __ _____ diff --git a/pkg/login/ldap.go b/pkg/login/ldap.go index abc3c6a587a59..355b4fd100a97 100644 --- a/pkg/login/ldap.go +++ b/pkg/login/ldap.go @@ -2,8 +2,10 @@ package login import ( "crypto/tls" + "crypto/x509" "errors" "fmt" + "io/ioutil" "strings" "github.com/davecgh/go-spew/spew" @@ -14,8 +16,9 @@ import ( ) type ldapAuther struct { - server *LdapServerConf - conn *ldap.Conn + server *LdapServerConf + conn *ldap.Conn + requireSecondBind bool } func NewLdapAuthenticator(server *LdapServerConf) *ldapAuther { @@ -23,18 +26,37 @@ func NewLdapAuthenticator(server *LdapServerConf) *ldapAuther { } func (a *ldapAuther) Dial() error { - address := fmt.Sprintf("%s:%d", a.server.Host, a.server.Port) var err error - if a.server.UseSSL { - tlsCfg := &tls.Config{ - InsecureSkipVerify: a.server.SkipVerifySSL, - ServerName: a.server.Host, + var certPool *x509.CertPool + if a.server.RootCACert != "" { + certPool := x509.NewCertPool() + for _, caCertFile := range strings.Split(a.server.RootCACert, " ") { + if pem, err := ioutil.ReadFile(caCertFile); err != nil { + return err + } else { + if !certPool.AppendCertsFromPEM(pem) { + return errors.New("Failed to append CA certficate " + caCertFile) + } + } } - a.conn, err = ldap.DialTLS("tcp", address, tlsCfg) - } else { - a.conn, err = ldap.Dial("tcp", address) } + for _, host := range strings.Split(a.server.Host, " ") { + address := fmt.Sprintf("%s:%d", host, a.server.Port) + if a.server.UseSSL { + tlsCfg := &tls.Config{ + InsecureSkipVerify: a.server.SkipVerifySSL, + ServerName: host, + RootCAs: certPool, + } + a.conn, err = ldap.DialTLS("tcp", address, tlsCfg) + } else { + a.conn, err = ldap.Dial("tcp", address) + } + if err == nil { + return nil + } + } return err } @@ -58,7 +80,7 @@ func (a *ldapAuther) login(query *LoginUserQuery) error { } // check if a second user bind is needed - if a.server.BindPassword != "" { + if a.requireSecondBind { if err := a.secondBind(ldapUser, query.Password); err != nil { return err } @@ -67,6 +89,10 @@ func (a *ldapAuther) login(query *LoginUserQuery) error { if grafanaUser, err := a.getGrafanaUserFor(ldapUser); err != nil { return err } else { + // sync user details + if err := a.syncUserInfo(grafanaUser, ldapUser); err != nil { + return err + } // sync org roles if err := a.syncOrgRoles(grafanaUser, ldapUser); err != nil { return err @@ -85,11 +111,12 @@ func (a *ldapAuther) getGrafanaUserFor(ldapUser *ldapUserInfo) (*m.User, error) for _, ldapGroup := range a.server.LdapGroups { if ldapUser.isMemberOf(ldapGroup.GroupDN) { access = true + break } } if !access { - log.Info("Ldap Auth: user %s does not belong in any of the specified ldap groups", ldapUser.Username) + log.Info("Ldap Auth: user %s does not belong in any of the specified ldap groups, ldapUser groups: %v", ldapUser.Username, ldapUser.MemberOf) return nil, ErrInvalidCredentials } @@ -120,6 +147,21 @@ func (a *ldapAuther) createGrafanaUser(ldapUser *ldapUserInfo) (*m.User, error) return &cmd.Result, nil } +func (a *ldapAuther) syncUserInfo(user *m.User, ldapUser *ldapUserInfo) error { + var name = fmt.Sprintf("%s %s", ldapUser.FirstName, ldapUser.LastName) + if user.Email == ldapUser.Email && user.Name == name { + return nil + } + + log.Info("Ldap: Syncing user info %s", ldapUser.Username) + updateCmd := m.UpdateUserCommand{} + updateCmd.UserId = user.Id + updateCmd.Login = user.Login + updateCmd.Email = ldapUser.Email + updateCmd.Name = fmt.Sprintf("%s %s", ldapUser.FirstName, ldapUser.LastName) + return bus.Dispatch(&updateCmd) +} + func (a *ldapAuther) syncOrgRoles(user *m.User, ldapUser *ldapUserInfo) error { if len(a.server.LdapGroups) == 0 { return nil @@ -130,9 +172,12 @@ func (a *ldapAuther) syncOrgRoles(user *m.User, ldapUser *ldapUserInfo) error { return err } + handledOrgIds := map[int64]bool{} + // update or remove org roles for _, org := range orgsQuery.Result { match := false + handledOrgIds[org.OrgId] = true for _, group := range a.server.LdapGroups { if org.OrgId != group.OrgId { @@ -168,20 +213,18 @@ func (a *ldapAuther) syncOrgRoles(user *m.User, ldapUser *ldapUserInfo) error { continue } - match := false - for _, org := range orgsQuery.Result { - if group.OrgId == org.OrgId { - match = true - } + if _, exists := handledOrgIds[group.OrgId]; exists { + continue } - if !match { - // add role - cmd := m.AddOrgUserCommand{UserId: user.Id, Role: group.OrgRole, OrgId: group.OrgId} - if err := bus.Dispatch(&cmd); err != nil { - return err - } + // add role + cmd := m.AddOrgUserCommand{UserId: user.Id, Role: group.OrgRole, OrgId: group.OrgId} + if err := bus.Dispatch(&cmd); err != nil { + return err } + + // mark this group has handled so we do not process it again + handledOrgIds[group.OrgId] = true } return nil @@ -189,6 +232,10 @@ func (a *ldapAuther) syncOrgRoles(user *m.User, ldapUser *ldapUserInfo) error { func (a *ldapAuther) secondBind(ldapUser *ldapUserInfo, userPassword string) error { if err := a.conn.Bind(ldapUser.DN, userPassword); err != nil { + if ldapCfg.VerboseLogging { + log.Info("LDAP second bind failed, %v", err) + } + if ldapErr, ok := err.(*ldap.Error); ok { if ldapErr.ResultCode == 49 { return ErrInvalidCredentials @@ -201,8 +248,9 @@ func (a *ldapAuther) secondBind(ldapUser *ldapUserInfo, userPassword string) err } func (a *ldapAuther) initialBind(username, userPassword string) error { - if a.server.BindPassword != "" { + if a.server.BindPassword != "" || a.server.BindDN == "" { userPassword = a.server.BindPassword + a.requireSecondBind = true } bindPath := a.server.BindDN @@ -211,6 +259,10 @@ func (a *ldapAuther) initialBind(username, userPassword string) error { } if err := a.conn.Bind(bindPath, userPassword); err != nil { + if ldapCfg.VerboseLogging { + log.Info("LDAP initial bind failed, %v", err) + } + if ldapErr, ok := err.(*ldap.Error); ok { if ldapErr.ResultCode == 49 { return ErrInvalidCredentials @@ -238,7 +290,7 @@ func (a *ldapAuther) searchForUser(username string) (*ldapUserInfo, error) { a.server.Attr.Name, a.server.Attr.MemberOf, }, - Filter: fmt.Sprintf(a.server.SearchFilter, username), + Filter: strings.Replace(a.server.SearchFilter, "%s", username, -1), } searchResult, err = a.conn.Search(&searchReq) @@ -259,18 +311,56 @@ func (a *ldapAuther) searchForUser(username string) (*ldapUserInfo, error) { return nil, errors.New("Ldap search matched more than one entry, please review your filter setting") } + var memberOf []string + if a.server.GroupSearchFilter == "" { + memberOf = getLdapAttrArray(a.server.Attr.MemberOf, searchResult) + } else { + // If we are using a POSIX LDAP schema it won't support memberOf, so we manually search the groups + var groupSearchResult *ldap.SearchResult + for _, groupSearchBase := range a.server.GroupSearchBaseDNs { + filter := strings.Replace(a.server.GroupSearchFilter, "%s", username, -1) + + if ldapCfg.VerboseLogging { + log.Info("LDAP: Searching for user's groups: %s", filter) + } + + groupSearchReq := ldap.SearchRequest{ + BaseDN: groupSearchBase, + Scope: ldap.ScopeWholeSubtree, + DerefAliases: ldap.NeverDerefAliases, + Attributes: []string{ + // Here MemberOf would be the thing that identifies the group, which is normally 'cn' + a.server.Attr.MemberOf, + }, + Filter: filter, + } + + groupSearchResult, err = a.conn.Search(&groupSearchReq) + if err != nil { + return nil, err + } + + if len(groupSearchResult.Entries) > 0 { + for i := range groupSearchResult.Entries { + memberOf = append(memberOf, getLdapAttrN(a.server.Attr.MemberOf, groupSearchResult, i)) + } + break + } + } + } + return &ldapUserInfo{ DN: searchResult.Entries[0].DN, LastName: getLdapAttr(a.server.Attr.Surname, searchResult), FirstName: getLdapAttr(a.server.Attr.Name, searchResult), Username: getLdapAttr(a.server.Attr.Username, searchResult), Email: getLdapAttr(a.server.Attr.Email, searchResult), - MemberOf: getLdapAttrArray(a.server.Attr.MemberOf, searchResult), + MemberOf: memberOf, }, nil } -func getLdapAttr(name string, result *ldap.SearchResult) string { - for _, attr := range result.Entries[0].Attributes { +func getLdapAttrN(name string, result *ldap.SearchResult, n int) string { + for _, attr := range result.Entries[n].Attributes { if attr.Name == name { if len(attr.Values) > 0 { return attr.Values[0] @@ -280,6 +370,10 @@ func getLdapAttr(name string, result *ldap.SearchResult) string { return "" } +func getLdapAttr(name string, result *ldap.SearchResult) string { + return getLdapAttrN(name, result, 0) +} + func getLdapAttrArray(name string, result *ldap.SearchResult) []string { for _, attr := range result.Entries[0].Attributes { if attr.Name == name { diff --git a/pkg/login/ldap_test.go b/pkg/login/ldap_test.go index 6713d0ca3fe10..2e4ed3a32e19c 100644 --- a/pkg/login/ldap_test.go +++ b/pkg/login/ldap_test.go @@ -54,7 +54,9 @@ func TestLdapAuther(t *testing.T) { ldapAutherScenario("Given no existing grafana user", func(sc *scenarioContext) { ldapAuther := NewLdapAuthenticator(&LdapServerConf{ LdapGroups: []*LdapGroupToOrgRole{ - {GroupDN: "cn=users", OrgRole: "Admin"}, + {GroupDN: "cn=admin", OrgRole: "Admin"}, + {GroupDN: "cn=editor", OrgRole: "Editor"}, + {GroupDN: "*", OrgRole: "Viewer"}, }, }) @@ -63,7 +65,7 @@ func TestLdapAuther(t *testing.T) { result, err := ldapAuther.getGrafanaUserFor(&ldapUserInfo{ Username: "torkelo", Email: "my@email.com", - MemberOf: []string{"cn=users"}, + MemberOf: []string{"cn=editor"}, }) So(err, ShouldBeNil) @@ -178,6 +180,25 @@ func TestLdapAuther(t *testing.T) { }) }) + ldapAutherScenario("given multiple matching ldap groups and no existing groups", func(sc *scenarioContext) { + ldapAuther := NewLdapAuthenticator(&LdapServerConf{ + LdapGroups: []*LdapGroupToOrgRole{ + {GroupDN: "cn=admins", OrgId: 1, OrgRole: "Admin"}, + {GroupDN: "*", OrgId: 1, OrgRole: "Viewer"}, + }, + }) + + sc.userOrgsQueryReturns([]*m.UserOrgDTO{}) + err := ldapAuther.syncOrgRoles(&m.User{}, &ldapUserInfo{ + MemberOf: []string{"cn=admins"}, + }) + + Convey("Should take first match, and ignore subsequent matches", func() { + So(err, ShouldBeNil) + So(sc.addOrgUserCmd.Role, ShouldEqual, m.ROLE_ADMIN) + }) + }) + }) } diff --git a/pkg/login/settings.go b/pkg/login/settings.go index 0a50df33c194f..b181dac3281bc 100644 --- a/pkg/login/settings.go +++ b/pkg/login/settings.go @@ -19,6 +19,7 @@ type LdapServerConf struct { Port int `toml:"port"` UseSSL bool `toml:"use_ssl"` SkipVerifySSL bool `toml:"ssl_skip_verify"` + RootCACert string `toml:"root_ca_cert"` BindDN string `toml:"bind_dn"` BindPassword string `toml:"bind_password"` Attr LdapAttributeMap `toml:"attributes"` @@ -26,6 +27,9 @@ type LdapServerConf struct { SearchFilter string `toml:"search_filter"` SearchBaseDNs []string `toml:"search_base_dns"` + GroupSearchFilter string `toml:"group_search_filter"` + GroupSearchBaseDNs []string `toml:"group_search_base_dns"` + LdapGroups []*LdapGroupToOrgRole `toml:"group_mappings"` } @@ -63,8 +67,6 @@ func loadLdapConfig() { // set default org id for _, server := range ldapCfg.Servers { - assertNotEmptyCfg(server.Host, "host") - assertNotEmptyCfg(server.BindDN, "bind_dn") assertNotEmptyCfg(server.SearchFilter, "search_filter") assertNotEmptyCfg(server.SearchBaseDNs, "search_base_dns") diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index f6dab8c8043db..8e10b2428b49d 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -13,13 +13,15 @@ var ( M_Api_Status_500 = NewComboCounterRef("api.status.500") M_Api_Status_404 = NewComboCounterRef("api.status.404") - M_Api_User_SignUp = NewComboCounterRef("api.user.signup") - M_Api_Dashboard_Get = NewComboCounterRef("api.dashboard.get") - M_Api_Dashboard_Post = NewComboCounterRef("api.dashboard.post") - M_Api_Admin_User_Create = NewComboCounterRef("api.admin.user_create") - M_Api_Login_Post = NewComboCounterRef("api.login.post") - M_Api_Login_OAuth = NewComboCounterRef("api.login.oauth") - M_Api_Org_Create = NewComboCounterRef("api.org.create") + M_Api_User_SignUpStarted = NewComboCounterRef("api.user.signup_started") + M_Api_User_SignUpCompleted = NewComboCounterRef("api.user.signup_completed") + M_Api_User_SignUpInvite = NewComboCounterRef("api.user.signup_invite") + M_Api_Dashboard_Get = NewComboCounterRef("api.dashboard.get") + M_Api_Dashboard_Post = NewComboCounterRef("api.dashboard.post") + M_Api_Admin_User_Create = NewComboCounterRef("api.admin.user_create") + M_Api_Login_Post = NewComboCounterRef("api.login.post") + M_Api_Login_OAuth = NewComboCounterRef("api.login.oauth") + M_Api_Org_Create = NewComboCounterRef("api.org.create") M_Api_Dashboard_Snapshot_Create = NewComboCounterRef("api.dashboard_snapshot.create") M_Api_Dashboard_Snapshot_External = NewComboCounterRef("api.dashboard_snapshot.external") diff --git a/pkg/metrics/report_usage.go b/pkg/metrics/report_usage.go index c8848fb53711a..abda18d12b7b5 100644 --- a/pkg/metrics/report_usage.go +++ b/pkg/metrics/report_usage.go @@ -36,12 +36,6 @@ func sendUsageStats() { "metrics": metrics, } - statsQuery := m.GetSystemStatsQuery{} - if err := bus.Dispatch(&statsQuery); err != nil { - log.Error(3, "Failed to get system stats", err) - return - } - UsageStats.Each(func(name string, i interface{}) { switch metric := i.(type) { case Counter: @@ -52,11 +46,36 @@ func sendUsageStats() { } }) + statsQuery := m.GetSystemStatsQuery{} + if err := bus.Dispatch(&statsQuery); err != nil { + log.Error(3, "Failed to get system stats", err) + return + } + metrics["stats.dashboards.count"] = statsQuery.Result.DashboardCount metrics["stats.users.count"] = statsQuery.Result.UserCount metrics["stats.orgs.count"] = statsQuery.Result.OrgCount - out, _ := json.Marshal(report) + dsStats := m.GetDataSourceStatsQuery{} + if err := bus.Dispatch(&dsStats); err != nil { + log.Error(3, "Failed to get datasource stats", err) + return + } + + // send counters for each data source + // but ignore any custom data sources + // as sending that name could be sensitive information + dsOtherCount := 0 + for _, dsStat := range dsStats.Result { + if m.IsKnownDataSourcePlugin(dsStat.Type) { + metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count + } else { + dsOtherCount += dsStat.Count + } + } + metrics["stats.ds.other.count"] = dsOtherCount + + out, _ := json.MarshalIndent(report, "", " ") data := bytes.NewBuffer(out) client := http.Client{Timeout: time.Duration(5 * time.Second)} diff --git a/pkg/middleware/auth.go b/pkg/middleware/auth.go index 9fb09a5c395d5..2497183b3562b 100644 --- a/pkg/middleware/auth.go +++ b/pkg/middleware/auth.go @@ -36,9 +36,19 @@ func getApiKey(c *Context) string { return "" } -func authDenied(c *Context) { +func accessForbidden(c *Context) { if c.IsApiRequest() { - c.JsonApiErr(401, "Access denied", nil) + c.JsonApiErr(403, "Permission denied", nil) + return + } + + c.SetCookie("redirect_to", url.QueryEscape(setting.AppSubUrl+c.Req.RequestURI), 0, setting.AppSubUrl+"/") + c.Redirect(setting.AppSubUrl + "/login") +} + +func notAuthorized(c *Context) { + if c.IsApiRequest() { + c.JsonApiErr(401, "Unauthorized", nil) return } @@ -56,20 +66,20 @@ func RoleAuth(roles ...m.RoleType) macaron.Handler { } } if !ok { - authDenied(c) + accessForbidden(c) } } } func Auth(options *AuthOptions) macaron.Handler { return func(c *Context) { - if !c.IsGrafanaAdmin && options.ReqGrafanaAdmin { - authDenied(c) + if !c.IsSignedIn && options.ReqSignedIn && !c.AllowAnonymous { + notAuthorized(c) return } - if !c.IsSignedIn && options.ReqSignedIn && !c.AllowAnonymous { - authDenied(c) + if !c.IsGrafanaAdmin && options.ReqGrafanaAdmin { + accessForbidden(c) return } } diff --git a/pkg/middleware/auth_proxy.go b/pkg/middleware/auth_proxy.go index 2529fef67a938..d5218b09ddaa9 100644 --- a/pkg/middleware/auth_proxy.go +++ b/pkg/middleware/auth_proxy.go @@ -2,6 +2,7 @@ package middleware import ( "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/log" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" ) @@ -39,8 +40,16 @@ func initContextWithAuthProxy(ctx *Context) bool { } } + // initialize session + if err := ctx.Session.Start(ctx); err != nil { + log.Error(3, "Failed to start session", err) + return false + } + ctx.SignedInUser = query.Result ctx.IsSignedIn = true + ctx.Session.Set(SESS_KEY_USERID, ctx.UserId) + return true } diff --git a/pkg/middleware/logger.go b/pkg/middleware/logger.go index 88c8eaede1568..eb5c7b8dde4e9 100644 --- a/pkg/middleware/logger.go +++ b/pkg/middleware/logger.go @@ -18,20 +18,13 @@ package middleware import ( "fmt" "net/http" - "runtime" "time" "github.com/Unknwon/macaron" "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/setting" ) -var isWindows bool - -func init() { - isWindows = runtime.GOOS == "windows" -} - -// Logger returns a middleware handler that logs the request as it goes in and the response as it goes out. func Logger() macaron.Handler { return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) { start := time.Now() @@ -40,20 +33,19 @@ func Logger() macaron.Handler { c.Next() content := fmt.Sprintf("Completed %s %v %s in %v", req.URL.Path, rw.Status(), http.StatusText(rw.Status()), time.Since(start)) - if !isWindows { - switch rw.Status() { - case 200: - content = fmt.Sprintf("\033[1;32m%s\033[0m", content) - return - case 304: - //content = fmt.Sprintf("\033[1;33m%s\033[0m", content) + + switch rw.Status() { + case 200, 304: + content = fmt.Sprintf("%s", content) + if !setting.RouterLogging { return - case 404: - content = fmt.Sprintf("\033[1;31m%s\033[0m", content) - case 500: - content = fmt.Sprintf("\033[1;36m%s\033[0m", content) } + case 404: + content = fmt.Sprintf("%s", content) + case 500: + content = fmt.Sprintf("%s", content) } + log.Info(content) } } diff --git a/pkg/middleware/quota.go b/pkg/middleware/quota.go new file mode 100644 index 0000000000000..f6ba74d77df54 --- /dev/null +++ b/pkg/middleware/quota.go @@ -0,0 +1,106 @@ +package middleware + +import ( + "fmt" + + "github.com/Unknwon/macaron" + "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/log" + m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" +) + +func Quota(target string) macaron.Handler { + return func(c *Context) { + limitReached, err := QuotaReached(c, target) + if err != nil { + c.JsonApiErr(500, "failed to get quota", err) + return + } + if limitReached { + c.JsonApiErr(403, fmt.Sprintf("%s Quota reached", target), nil) + return + } + } +} + +func QuotaReached(c *Context, target string) (bool, error) { + if !setting.Quota.Enabled { + return false, nil + } + + // get the list of scopes that this target is valid for. Org, User, Global + scopes, err := m.GetQuotaScopes(target) + if err != nil { + return false, err + } + + log.Debug(fmt.Sprintf("checking quota for %s in scopes %v", target, scopes)) + + for _, scope := range scopes { + log.Debug(fmt.Sprintf("checking scope %s", scope.Name)) + + switch scope.Name { + case "global": + if scope.DefaultLimit < 0 { + continue + } + if scope.DefaultLimit == 0 { + return true, nil + } + if target == "session" { + usedSessions := getSessionCount() + if int64(usedSessions) > scope.DefaultLimit { + log.Debug(fmt.Sprintf("%d sessions active, limit is %d", usedSessions, scope.DefaultLimit)) + return true, nil + } + continue + } + query := m.GetGlobalQuotaByTargetQuery{Target: scope.Target} + if err := bus.Dispatch(&query); err != nil { + return true, err + } + if query.Result.Used >= scope.DefaultLimit { + return true, nil + } + case "org": + if !c.IsSignedIn { + continue + } + query := m.GetOrgQuotaByTargetQuery{OrgId: c.OrgId, Target: scope.Target, Default: scope.DefaultLimit} + if err := bus.Dispatch(&query); err != nil { + return true, err + } + if query.Result.Limit < 0 { + continue + } + if query.Result.Limit == 0 { + return true, nil + } + + if query.Result.Used >= query.Result.Limit { + return true, nil + } + case "user": + if !c.IsSignedIn || c.UserId == 0 { + continue + } + query := m.GetUserQuotaByTargetQuery{UserId: c.UserId, Target: scope.Target, Default: scope.DefaultLimit} + if err := bus.Dispatch(&query); err != nil { + return true, err + } + if query.Result.Limit < 0 { + continue + } + if query.Result.Limit == 0 { + return true, nil + } + + if query.Result.Used >= query.Result.Limit { + return true, nil + } + } + } + + return false, nil +} diff --git a/pkg/middleware/quota_test.go b/pkg/middleware/quota_test.go new file mode 100644 index 0000000000000..b68aa485fa7b0 --- /dev/null +++ b/pkg/middleware/quota_test.go @@ -0,0 +1,150 @@ +package middleware + +import ( + "testing" + + "github.com/grafana/grafana/pkg/bus" + m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" + . "github.com/smartystreets/goconvey/convey" +) + +func TestMiddlewareQuota(t *testing.T) { + + Convey("Given the grafana quota middleware", t, func() { + getSessionCount = func() int { + return 4 + } + + setting.AnonymousEnabled = false + setting.Quota = setting.QuotaSettings{ + Enabled: true, + Org: &setting.OrgQuota{ + User: 5, + Dashboard: 5, + DataSource: 5, + ApiKey: 5, + }, + User: &setting.UserQuota{ + Org: 5, + }, + Global: &setting.GlobalQuota{ + Org: 5, + User: 5, + Dashboard: 5, + DataSource: 5, + ApiKey: 5, + Session: 5, + }, + } + + middlewareScenario("with user not logged in", func(sc *scenarioContext) { + bus.AddHandler("globalQuota", func(query *m.GetGlobalQuotaByTargetQuery) error { + query.Result = &m.GlobalQuotaDTO{ + Target: query.Target, + Limit: query.Default, + Used: 4, + } + return nil + }) + Convey("global quota not reached", func() { + sc.m.Get("/user", Quota("user"), sc.defaultHandler) + sc.fakeReq("GET", "/user").exec() + So(sc.resp.Code, ShouldEqual, 200) + }) + Convey("global quota reached", func() { + setting.Quota.Global.User = 4 + sc.m.Get("/user", Quota("user"), sc.defaultHandler) + sc.fakeReq("GET", "/user").exec() + So(sc.resp.Code, ShouldEqual, 403) + }) + Convey("global session quota not reached", func() { + setting.Quota.Global.Session = 10 + sc.m.Get("/user", Quota("session"), sc.defaultHandler) + sc.fakeReq("GET", "/user").exec() + So(sc.resp.Code, ShouldEqual, 200) + }) + Convey("global session quota reached", func() { + setting.Quota.Global.Session = 1 + sc.m.Get("/user", Quota("session"), sc.defaultHandler) + sc.fakeReq("GET", "/user").exec() + So(sc.resp.Code, ShouldEqual, 403) + }) + }) + + middlewareScenario("with user logged in", func(sc *scenarioContext) { + // log us in, so we have a user_id and org_id in the context + sc.fakeReq("GET", "/").handler(func(c *Context) { + c.Session.Set(SESS_KEY_USERID, int64(12)) + }).exec() + + bus.AddHandler("test", func(query *m.GetSignedInUserQuery) error { + query.Result = &m.SignedInUser{OrgId: 2, UserId: 12} + return nil + }) + bus.AddHandler("globalQuota", func(query *m.GetGlobalQuotaByTargetQuery) error { + query.Result = &m.GlobalQuotaDTO{ + Target: query.Target, + Limit: query.Default, + Used: 4, + } + return nil + }) + bus.AddHandler("userQuota", func(query *m.GetUserQuotaByTargetQuery) error { + query.Result = &m.UserQuotaDTO{ + Target: query.Target, + Limit: query.Default, + Used: 4, + } + return nil + }) + bus.AddHandler("orgQuota", func(query *m.GetOrgQuotaByTargetQuery) error { + query.Result = &m.OrgQuotaDTO{ + Target: query.Target, + Limit: query.Default, + Used: 4, + } + return nil + }) + Convey("global datasource quota reached", func() { + setting.Quota.Global.DataSource = 4 + sc.m.Get("/ds", Quota("data_source"), sc.defaultHandler) + sc.fakeReq("GET", "/ds").exec() + So(sc.resp.Code, ShouldEqual, 403) + }) + Convey("user Org quota not reached", func() { + setting.Quota.User.Org = 5 + sc.m.Get("/org", Quota("org"), sc.defaultHandler) + sc.fakeReq("GET", "/org").exec() + So(sc.resp.Code, ShouldEqual, 200) + }) + Convey("user Org quota reached", func() { + setting.Quota.User.Org = 4 + sc.m.Get("/org", Quota("org"), sc.defaultHandler) + sc.fakeReq("GET", "/org").exec() + So(sc.resp.Code, ShouldEqual, 403) + }) + Convey("org dashboard quota not reached", func() { + setting.Quota.Org.Dashboard = 10 + sc.m.Get("/dashboard", Quota("dashboard"), sc.defaultHandler) + sc.fakeReq("GET", "/dashboard").exec() + So(sc.resp.Code, ShouldEqual, 200) + }) + Convey("org dashboard quota reached", func() { + setting.Quota.Org.Dashboard = 4 + sc.m.Get("/dashboard", Quota("dashboard"), sc.defaultHandler) + sc.fakeReq("GET", "/dashboard").exec() + So(sc.resp.Code, ShouldEqual, 403) + }) + Convey("org dashboard quota reached but quotas disabled", func() { + setting.Quota.Org.Dashboard = 4 + setting.Quota.Enabled = false + sc.m.Get("/dashboard", Quota("dashboard"), sc.defaultHandler) + sc.fakeReq("GET", "/dashboard").exec() + So(sc.resp.Code, ShouldEqual, 200) + }) + + }) + + }) +} diff --git a/pkg/middleware/session.go b/pkg/middleware/session.go index 7b036b9790eaf..fc1512e5bd63b 100644 --- a/pkg/middleware/session.go +++ b/pkg/middleware/session.go @@ -18,12 +18,16 @@ const ( var sessionManager *session.Manager var sessionOptions *session.Options var startSessionGC func() +var getSessionCount func() int func init() { startSessionGC = func() { sessionManager.GC() time.AfterFunc(time.Duration(sessionOptions.Gclifetime)*time.Second, startSessionGC) } + getSessionCount = func() int { + return sessionManager.Count() + } } func prepareOptions(opt *session.Options) *session.Options { diff --git a/pkg/models/address.go b/pkg/models/address.go new file mode 100644 index 0000000000000..0cf8aaf7ddb91 --- /dev/null +++ b/pkg/models/address.go @@ -0,0 +1,10 @@ +package models + +type Address struct { + Address1 string `json:"address1"` + Address2 string `json:"address2"` + City string `json:"city"` + ZipCode string `json:"zipCode"` + State string `json:"state"` + Country string `json:"country"` +} diff --git a/pkg/models/dashboards.go b/pkg/models/dashboards.go index 7d4a26905563d..5b926c3e31441 100644 --- a/pkg/models/dashboards.go +++ b/pkg/models/dashboards.go @@ -43,6 +43,8 @@ func NewDashboard(title string) *Dashboard { dash.Data = make(map[string]interface{}) dash.Data["title"] = title dash.Title = title + dash.Created = time.Now() + dash.Updated = time.Now() dash.UpdateSlug() return dash } @@ -73,9 +75,12 @@ func NewDashboardFromJson(data map[string]interface{}) *Dashboard { if dash.Data["version"] != nil { dash.Version = int(dash.Data["version"].(float64)) + dash.Updated = time.Now() } } else { dash.Data["version"] = 0 + dash.Created = time.Now() + dash.Updated = time.Now() } return dash diff --git a/pkg/models/datasource.go b/pkg/models/datasource.go index c756faaba59a5..38273598ab146 100644 --- a/pkg/models/datasource.go +++ b/pkg/models/datasource.go @@ -11,6 +11,9 @@ const ( DS_INFLUXDB_08 = "influxdb_08" DS_ES = "elasticsearch" DS_OPENTSDB = "opentsdb" + DS_CLOUDWATCH = "cloudwatch" + DS_KAIROSDB = "kairosdb" + DS_PROMETHEUS = "prometheus" DS_ACCESS_DIRECT = "direct" DS_ACCESS_PROXY = "proxy" ) @@ -44,6 +47,27 @@ type DataSource struct { Updated time.Time } +var knownDatasourcePlugins map[string]bool = map[string]bool{ + DS_ES: true, + DS_GRAPHITE: true, + DS_INFLUXDB: true, + DS_INFLUXDB_08: true, + DS_KAIROSDB: true, + DS_CLOUDWATCH: true, + DS_PROMETHEUS: true, + DS_OPENTSDB: true, + "opennms": true, + "druid": true, + "dalmatinerdb": true, + "gnocci": true, + "zabbix": true, +} + +func IsKnownDataSourcePlugin(dsType string) bool { + _, exists := knownDatasourcePlugins[dsType] + return exists +} + // ---------------------- // COMMANDS diff --git a/pkg/models/org.go b/pkg/models/org.go index b2d18be953747..2a9fa8efa04ab 100644 --- a/pkg/models/org.go +++ b/pkg/models/org.go @@ -7,13 +7,22 @@ import ( // Typed errors var ( - ErrOrgNotFound = errors.New("Organization not found") + ErrOrgNotFound = errors.New("Organization not found") + ErrOrgNameTaken = errors.New("Organization name is taken") ) type Org struct { Id int64 Version int Name string + + Address1 string + Address2 string + City string + ZipCode string + State string + Country string + Created time.Time Updated time.Time } @@ -34,8 +43,13 @@ type DeleteOrgCommand struct { } type UpdateOrgCommand struct { - Name string `json:"name" binding:"Required"` - OrgId int64 `json:"-"` + Name string + OrgId int64 +} + +type UpdateOrgAddressCommand struct { + OrgId int64 + Address } type GetOrgByIdQuery struct { @@ -62,6 +76,12 @@ type OrgDTO struct { Name string `json:"name"` } +type OrgDetailsDTO struct { + Id int64 `json:"id"` + Name string `json:"name"` + Address Address `json:"address"` +} + type UserOrgDTO struct { OrgId int64 `json:"orgId"` Name string `json:"name"` diff --git a/pkg/models/org_user.go b/pkg/models/org_user.go index afbb10386c8aa..f456fa9537379 100644 --- a/pkg/models/org_user.go +++ b/pkg/models/org_user.go @@ -7,9 +7,10 @@ import ( // Typed errors var ( - ErrInvalidRoleType = errors.New("Invalid role type") - ErrLastOrgAdmin = errors.New("Cannot remove last organization admin") - ErrOrgUserNotFound = errors.New("Cannot find the organization user") + ErrInvalidRoleType = errors.New("Invalid role type") + ErrLastOrgAdmin = errors.New("Cannot remove last organization admin") + ErrOrgUserNotFound = errors.New("Cannot find the organization user") + ErrOrgUserAlreadyAdded = errors.New("User is already added to organization") ) type RoleType string diff --git a/pkg/models/quotas.go b/pkg/models/quotas.go new file mode 100644 index 0000000000000..85159d830d1d9 --- /dev/null +++ b/pkg/models/quotas.go @@ -0,0 +1,130 @@ +package models + +import ( + "errors" + "github.com/grafana/grafana/pkg/setting" + "time" +) + +var ErrInvalidQuotaTarget = errors.New("Invalid quota target") + +type Quota struct { + Id int64 + OrgId int64 + UserId int64 + Target string + Limit int64 + Created time.Time + Updated time.Time +} + +type QuotaScope struct { + Name string + Target string + DefaultLimit int64 +} + +type OrgQuotaDTO struct { + OrgId int64 `json:"org_id"` + Target string `json:"target"` + Limit int64 `json:"limit"` + Used int64 `json:"used"` +} + +type UserQuotaDTO struct { + UserId int64 `json:"user_id"` + Target string `json:"target"` + Limit int64 `json:"limit"` + Used int64 `json:"used"` +} + +type GlobalQuotaDTO struct { + Target string `json:"target"` + Limit int64 `json:"limit"` + Used int64 `json:"used"` +} + +type GetOrgQuotaByTargetQuery struct { + Target string + OrgId int64 + Default int64 + Result *OrgQuotaDTO +} + +type GetOrgQuotasQuery struct { + OrgId int64 + Result []*OrgQuotaDTO +} + +type GetUserQuotaByTargetQuery struct { + Target string + UserId int64 + Default int64 + Result *UserQuotaDTO +} + +type GetUserQuotasQuery struct { + UserId int64 + Result []*UserQuotaDTO +} + +type GetGlobalQuotaByTargetQuery struct { + Target string + Default int64 + Result *GlobalQuotaDTO +} + +type UpdateOrgQuotaCmd struct { + Target string `json:"target"` + Limit int64 `json:"limit"` + OrgId int64 `json:"-"` +} + +type UpdateUserQuotaCmd struct { + Target string `json:"target"` + Limit int64 `json:"limit"` + UserId int64 `json:"-"` +} + +func GetQuotaScopes(target string) ([]QuotaScope, error) { + scopes := make([]QuotaScope, 0) + switch target { + case "user": + scopes = append(scopes, + QuotaScope{Name: "global", Target: target, DefaultLimit: setting.Quota.Global.User}, + QuotaScope{Name: "org", Target: "org_user", DefaultLimit: setting.Quota.Org.User}, + ) + return scopes, nil + case "org": + scopes = append(scopes, + QuotaScope{Name: "global", Target: target, DefaultLimit: setting.Quota.Global.Org}, + QuotaScope{Name: "user", Target: "org_user", DefaultLimit: setting.Quota.User.Org}, + ) + return scopes, nil + case "dashboard": + scopes = append(scopes, + QuotaScope{Name: "global", Target: target, DefaultLimit: setting.Quota.Global.Dashboard}, + QuotaScope{Name: "org", Target: target, DefaultLimit: setting.Quota.Org.Dashboard}, + ) + return scopes, nil + case "data_source": + scopes = append(scopes, + QuotaScope{Name: "global", Target: target, DefaultLimit: setting.Quota.Global.DataSource}, + QuotaScope{Name: "org", Target: target, DefaultLimit: setting.Quota.Org.DataSource}, + ) + return scopes, nil + case "api_key": + scopes = append(scopes, + QuotaScope{Name: "global", Target: target, DefaultLimit: setting.Quota.Global.ApiKey}, + QuotaScope{Name: "org", Target: target, DefaultLimit: setting.Quota.Org.ApiKey}, + ) + return scopes, nil + case "session": + scopes = append(scopes, + QuotaScope{Name: "global", Target: target, DefaultLimit: setting.Quota.Global.Session}, + ) + return scopes, nil + default: + return scopes, ErrInvalidQuotaTarget + } +} diff --git a/pkg/models/stats.go b/pkg/models/stats.go index 0d83882e66652..6a060137ac7ed 100644 --- a/pkg/models/stats.go +++ b/pkg/models/stats.go @@ -6,6 +6,15 @@ type SystemStats struct { OrgCount int } +type DataSourceStats struct { + Count int + Type string +} + type GetSystemStatsQuery struct { Result *SystemStats } + +type GetDataSourceStatsQuery struct { + Result []*DataSourceStats +} diff --git a/pkg/models/temp_user.go b/pkg/models/temp_user.go new file mode 100644 index 0000000000000..00c496c484457 --- /dev/null +++ b/pkg/models/temp_user.go @@ -0,0 +1,92 @@ +package models + +import ( + "errors" + "time" +) + +// Typed errors +var ( + ErrTempUserNotFound = errors.New("User not found") +) + +type TempUserStatus string + +const ( + TmpUserSignUpStarted TempUserStatus = "SignUpStarted" + TmpUserInvitePending TempUserStatus = "InvitePending" + TmpUserCompleted TempUserStatus = "Completed" + TmpUserRevoked TempUserStatus = "Revoked" +) + +// TempUser holds data for org invites and unconfirmed sign ups +type TempUser struct { + Id int64 + OrgId int64 + Version int + Email string + Name string + Role RoleType + InvitedByUserId int64 + Status TempUserStatus + + EmailSent bool + EmailSentOn time.Time + Code string + RemoteAddr string + + Created time.Time + Updated time.Time +} + +// --------------------- +// COMMANDS + +type CreateTempUserCommand struct { + Email string + Name string + OrgId int64 + InvitedByUserId int64 + Status TempUserStatus + Code string + Role RoleType + RemoteAddr string + + Result *TempUser +} + +type UpdateTempUserStatusCommand struct { + Code string + Status TempUserStatus +} + +type GetTempUsersQuery struct { + OrgId int64 + Email string + Status TempUserStatus + + Result []*TempUserDTO +} + +type GetTempUserByCodeQuery struct { + Code string + + Result *TempUserDTO +} + +type TempUserDTO struct { + Id int64 `json:"id"` + OrgId int64 `json:"orgId"` + Name string `json:"name"` + Email string `json:"email"` + Role RoleType `json:"role"` + InvitedByLogin string `json:"invitedByLogin"` + InvitedByEmail string `json:"invitedByEmail"` + InvitedByName string `json:"invitedByName"` + Code string `json:"code"` + Status TempUserStatus `json:"status"` + Url string `json:"url"` + EmailSent bool `json:"emailSent"` + EmailSentOn time.Time `json:"emailSentOn"` + Created time.Time `json:"createdOn"` +} diff --git a/pkg/models/user.go b/pkg/models/user.go index bf697676b3217..2842bad490da4 100644 --- a/pkg/models/user.go +++ b/pkg/models/user.go @@ -44,14 +44,17 @@ func (u *User) NameOrFallback() string { // COMMANDS type CreateUserCommand struct { - Email string `json:"email" binding:"Required"` - Login string `json:"login"` - Name string `json:"name"` - Company string `json:"compay"` - Password string `json:"password" binding:"Required"` - IsAdmin bool `json:"-"` + Email string + Login string + Name string + Company string + OrgName string + Password string + EmailVerified bool + IsAdmin bool + SkipOrgSetup bool - Result User `json:"-"` + Result User } type UpdateUserCommand struct { @@ -154,3 +157,8 @@ type UserSearchHitDTO struct { Email string `json:"email"` IsAdmin bool `json:"isAdmin"` } + +type UserIdDTO struct { + Id int64 `json:"id"` + Message string `json:"message"` +} diff --git a/pkg/plugins/plugins.go b/pkg/plugins/plugins.go index 2f7e5264e537b..665cf6a36ca47 100644 --- a/pkg/plugins/plugins.go +++ b/pkg/plugins/plugins.go @@ -91,7 +91,6 @@ func (scanner *PluginScanner) loadPluginJson(path string) error { if !exists { return errors.New("Did not find type property in plugin.json") } - DataSources[datasourceType.(string)] = pluginJson } diff --git a/pkg/services/notifications/notifications.go b/pkg/services/notifications/notifications.go index 401cd812d5ea4..63ce7219618aa 100644 --- a/pkg/services/notifications/notifications.go +++ b/pkg/services/notifications/notifications.go @@ -3,7 +3,9 @@ package notifications import ( "bytes" "errors" + "fmt" "html/template" + "net/url" "path/filepath" "github.com/grafana/grafana/pkg/bus" @@ -16,6 +18,7 @@ import ( var mailTemplates *template.Template var tmplResetPassword = "reset_password.html" +var tmplSignUpStarted = "signup_started.html" var tmplWelcomeOnSignUp = "welcome_on_signup.html" func Init() error { @@ -25,7 +28,8 @@ func Init() error { bus.AddHandler("email", validateResetPasswordCode) bus.AddHandler("email", sendEmailCommandHandler) - bus.AddEventListener(userSignedUpHandler) + bus.AddEventListener(signUpStartedHandler) + bus.AddEventListener(signUpCompletedHandler) mailTemplates = template.New("name") mailTemplates.Funcs(template.FuncMap{ @@ -60,16 +64,28 @@ func sendEmailCommandHandler(cmd *m.SendEmailCommand) error { } var buffer bytes.Buffer + var err error + var subjectText interface{} + data := cmd.Data if data == nil { data = make(map[string]interface{}, 10) } setDefaultTemplateData(data, nil) - mailTemplates.ExecuteTemplate(&buffer, cmd.Template, data) + err = mailTemplates.ExecuteTemplate(&buffer, cmd.Template, data) + if err != nil { + return err + } + + subjectData := data["Subject"].(map[string]interface{}) + subjectText, hasSubject := subjectData["value"] - subjectTmplText := data["Subject"].(map[string]interface{})["value"].(string) - subjectTmpl, err := template.New("subject").Parse(subjectTmplText) + if !hasSubject { + return errors.New(fmt.Sprintf("Missing subject in Template %s", cmd.Template)) + } + + subjectTmpl, err := template.New("subject").Parse(subjectText.(string)) if err != nil { return err } @@ -120,9 +136,29 @@ func validateResetPasswordCode(query *m.ValidateResetPasswordCodeQuery) error { return nil } -func userSignedUpHandler(evt *events.UserSignedUp) error { - log.Info("User signed up: %s, send_option: %s", evt.Email, setting.Smtp.SendWelcomeEmailOnSignUp) +func signUpStartedHandler(evt *events.SignUpStarted) error { + if !setting.VerifyEmailEnabled { + return nil + } + + log.Info("User signup started: %s", evt.Email) + + if evt.Email == "" { + return nil + } + + return sendEmailCommandHandler(&m.SendEmailCommand{ + To: []string{evt.Email}, + Template: tmplSignUpStarted, + Data: map[string]interface{}{ + "Email": evt.Email, + "Code": evt.Code, + "SignUpUrl": setting.ToAbsUrl(fmt.Sprintf("signup/?email=%s&code=%s", url.QueryEscape(evt.Email), url.QueryEscape(evt.Code))), + }, + }) +} +func signUpCompletedHandler(evt *events.SignUpCompleted) error { if evt.Email == "" || !setting.Smtp.SendWelcomeEmailOnSignUp { return nil } @@ -131,7 +167,7 @@ func userSignedUpHandler(evt *events.UserSignedUp) error { To: []string{evt.Email}, Template: tmplWelcomeOnSignUp, Data: map[string]interface{}{ - "Name": evt.Login, + "Name": evt.Name, }, }) } diff --git a/pkg/services/sqlstore/dashboard.go b/pkg/services/sqlstore/dashboard.go index 7fdaace316e36..bbec541589c5d 100644 --- a/pkg/services/sqlstore/dashboard.go +++ b/pkg/services/sqlstore/dashboard.go @@ -146,7 +146,7 @@ func SearchDashboards(query *search.FindPersistedDashboardsQuery) error { } if len(query.Title) > 0 { - sql.WriteString(" AND dashboard.title LIKE ?") + sql.WriteString(" AND dashboard.title " + dialect.LikeStr() + " ?") params = append(params, "%"+query.Title+"%") } @@ -198,11 +198,28 @@ func GetDashboardTags(query *m.GetDashboardTagsQuery) error { } func DeleteDashboard(cmd *m.DeleteDashboardCommand) error { - sess := x.NewSession() - defer sess.Close() + return inTransaction2(func(sess *session) error { + dashboard := m.Dashboard{Slug: cmd.Slug, OrgId: cmd.OrgId} + has, err := x.Get(&dashboard) + if err != nil { + return err + } else if has == false { + return m.ErrDashboardNotFound + } - rawSql := "DELETE FROM dashboard WHERE org_id=? and slug=?" - _, err := sess.Exec(rawSql, cmd.OrgId, cmd.Slug) + deletes := []string{ + "DELETE FROM dashboard_tag WHERE dashboard_id = ? ", + "DELETE FROM star WHERE dashboard_id = ? ", + "DELETE FROM dashboard WHERE id = ?", + } - return err + for _, sql := range deletes { + _, err := sess.Exec(sql, dashboard.Id) + if err != nil { + return err + } + } + + return nil + }) } diff --git a/pkg/services/sqlstore/migrations/migrations.go b/pkg/services/sqlstore/migrations/migrations.go index 329c6187c9ddd..8f7054d395906 100644 --- a/pkg/services/sqlstore/migrations/migrations.go +++ b/pkg/services/sqlstore/migrations/migrations.go @@ -10,12 +10,14 @@ import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator" func AddMigrations(mg *Migrator) { addMigrationLogMigrations(mg) addUserMigrations(mg) + addTempUserMigrations(mg) addStarMigrations(mg) addOrgMigrations(mg) addDashboardMigration(mg) addDataSourceMigration(mg) addApiKeyMigrations(mg) addDashboardSnapshotMigrations(mg) + addQuotaMigration(mg) } func addMigrationLogMigrations(mg *Migrator) { diff --git a/pkg/services/sqlstore/migrations/quota_mig.go b/pkg/services/sqlstore/migrations/quota_mig.go new file mode 100644 index 0000000000000..d877bb4c3c1d1 --- /dev/null +++ b/pkg/services/sqlstore/migrations/quota_mig.go @@ -0,0 +1,28 @@ +package migrations + +import ( + . "github.com/grafana/grafana/pkg/services/sqlstore/migrator" +) + +func addQuotaMigration(mg *Migrator) { + + var quotaV1 = Table{ + Name: "quota", + Columns: []*Column{ + {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, + {Name: "org_id", Type: DB_BigInt, Nullable: true}, + {Name: "user_id", Type: DB_BigInt, Nullable: true}, + {Name: "target", Type: DB_NVarchar, Length: 255, Nullable: false}, + {Name: "limit", Type: DB_BigInt, Nullable: false}, + {Name: "created", Type: DB_DateTime, Nullable: false}, + {Name: "updated", Type: DB_DateTime, Nullable: false}, + }, + Indices: []*Index{ + {Cols: []string{"org_id", "user_id", "target"}, Type: UniqueIndex}, + }, + } + mg.AddMigration("create quota table v1", NewAddTableMigration(quotaV1)) + + //------- indexes ------------------ + addTableIndicesMigrations(mg, "v1", quotaV1) +} diff --git a/pkg/services/sqlstore/migrations/temp_user.go b/pkg/services/sqlstore/migrations/temp_user.go new file mode 100644 index 0000000000000..3cae5d82472cf --- /dev/null +++ b/pkg/services/sqlstore/migrations/temp_user.go @@ -0,0 +1,38 @@ +package migrations + +import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator" + +func addTempUserMigrations(mg *Migrator) { + tempUserV1 := Table{ + Name: "temp_user", + Columns: []*Column{ + {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, + {Name: "org_id", Type: DB_BigInt, Nullable: false}, + {Name: "version", Type: DB_Int, Nullable: false}, + {Name: "email", Type: DB_NVarchar, Length: 255}, + {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: true}, + {Name: "role", Type: DB_NVarchar, Length: 20, Nullable: true}, + {Name: "code", Type: DB_NVarchar, Length: 255}, + {Name: "status", Type: DB_Varchar, Length: 20}, + {Name: "invited_by_user_id", Type: DB_BigInt, Nullable: true}, + {Name: "email_sent", Type: DB_Bool}, + {Name: "email_sent_on", Type: DB_DateTime, Nullable: true}, + {Name: "remote_addr", Type: DB_Varchar, Length: 255, Nullable: true}, + {Name: "created", Type: DB_DateTime}, + {Name: "updated", Type: DB_DateTime}, + }, + Indices: []*Index{ + {Cols: []string{"email"}, Type: IndexType}, + {Cols: []string{"org_id"}, Type: IndexType}, + {Cols: []string{"code"}, Type: IndexType}, + {Cols: []string{"status"}, Type: IndexType}, + }, + } + + // addDropAllIndicesMigrations(mg, "v7", tempUserV1) + // mg.AddMigration("Drop old table tempUser v7", NewDropTableMigration("temp_user")) + + // create table + mg.AddMigration("create temp user table v1-7", NewAddTableMigration(tempUserV1)) + addTableIndicesMigrations(mg, "v1-7", tempUserV1) +} diff --git a/pkg/services/sqlstore/migrator/dialect.go b/pkg/services/sqlstore/migrator/dialect.go index 9e3c3a11e16a2..f2fffbb16b3ad 100644 --- a/pkg/services/sqlstore/migrator/dialect.go +++ b/pkg/services/sqlstore/migrator/dialect.go @@ -16,6 +16,7 @@ type Dialect interface { ShowCreateNull() bool SqlType(col *Column) string SupportEngine() bool + LikeStr() string CreateIndexSql(tableName string, index *Index) string CreateTableSql(table *Table) string @@ -58,6 +59,10 @@ func (b *BaseDialect) AndStr() string { return "AND" } +func (b *BaseDialect) LikeStr() string { + return "LIKE" +} + func (b *BaseDialect) OrStr() string { return "OR" } diff --git a/pkg/services/sqlstore/migrator/migrator.go b/pkg/services/sqlstore/migrator/migrator.go index 2461d9dab0cd5..48000e34ca26b 100644 --- a/pkg/services/sqlstore/migrator/migrator.go +++ b/pkg/services/sqlstore/migrator/migrator.go @@ -100,6 +100,7 @@ func (mg *Migrator) Start() error { } if err := mg.exec(m); err != nil { + log.Error(3, "Migrator: error: \n%s:\n%s", err, sql) record.Error = err.Error() mg.x.Insert(&record) return err diff --git a/pkg/services/sqlstore/migrator/postgres_dialect.go b/pkg/services/sqlstore/migrator/postgres_dialect.go index 64c6772c2d151..3ab13f49b4934 100644 --- a/pkg/services/sqlstore/migrator/postgres_dialect.go +++ b/pkg/services/sqlstore/migrator/postgres_dialect.go @@ -28,6 +28,10 @@ func (db *Postgres) QuoteStr() string { return "\"" } +func (b *Postgres) LikeStr() string { + return "ILIKE" +} + func (db *Postgres) AutoIncrStr() string { return "" } diff --git a/pkg/services/sqlstore/org.go b/pkg/services/sqlstore/org.go index 725a21d7fad53..6ac171127c940 100644 --- a/pkg/services/sqlstore/org.go +++ b/pkg/services/sqlstore/org.go @@ -5,7 +5,6 @@ import ( "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/events" - "github.com/grafana/grafana/pkg/log" m "github.com/grafana/grafana/pkg/models" ) @@ -13,6 +12,7 @@ func init() { bus.AddHandler("sql", GetOrgById) bus.AddHandler("sql", CreateOrg) bus.AddHandler("sql", UpdateOrg) + bus.AddHandler("sql", UpdateOrgAddress) bus.AddHandler("sql", GetOrgByName) bus.AddHandler("sql", SearchOrgs) bus.AddHandler("sql", DeleteOrg) @@ -63,9 +63,31 @@ func GetOrgByName(query *m.GetOrgByNameQuery) error { return nil } +func isOrgNameTaken(name string, existingId int64, sess *session) (bool, error) { + // check if org name is taken + var org m.Org + exists, err := sess.Where("name=?", name).Get(&org) + + if err != nil { + return false, nil + } + + if exists && existingId != org.Id { + return true, nil + } + + return false, nil +} + func CreateOrg(cmd *m.CreateOrgCommand) error { return inTransaction2(func(sess *session) error { + if isNameTaken, err := isOrgNameTaken(cmd.Name, 0, sess); err != nil { + return err + } else if isNameTaken { + return m.ErrOrgNameTaken + } + org := m.Org{ Name: cmd.Name, Created: time.Now(), @@ -100,6 +122,12 @@ func CreateOrg(cmd *m.CreateOrgCommand) error { func UpdateOrg(cmd *m.UpdateOrgCommand) error { return inTransaction2(func(sess *session) error { + if isNameTaken, err := isOrgNameTaken(cmd.Name, cmd.OrgId, sess); err != nil { + return err + } else if isNameTaken { + return m.ErrOrgNameTaken + } + org := m.Org{ Name: cmd.Name, Updated: time.Now(), @@ -119,21 +147,48 @@ func UpdateOrg(cmd *m.UpdateOrgCommand) error { }) } +func UpdateOrgAddress(cmd *m.UpdateOrgAddressCommand) error { + return inTransaction2(func(sess *session) error { + org := m.Org{ + Address1: cmd.Address1, + Address2: cmd.Address2, + City: cmd.City, + ZipCode: cmd.ZipCode, + State: cmd.State, + Country: cmd.Country, + + Updated: time.Now(), + } + + if _, err := sess.Id(cmd.OrgId).Update(&org); err != nil { + return err + } + + sess.publishAfterCommit(&events.OrgUpdated{ + Timestamp: org.Updated, + Id: org.Id, + Name: org.Name, + }) + + return nil + }) +} + func DeleteOrg(cmd *m.DeleteOrgCommand) error { return inTransaction2(func(sess *session) error { deletes := []string{ - "DELETE FROM star WHERE EXISTS (SELECT 1 FROM dashboard WHERE org_id = ?)", - "DELETE FROM dashboard_tag WHERE EXISTS (SELECT 1 FROM dashboard WHERE org_id = ?)", + "DELETE FROM star WHERE EXISTS (SELECT 1 FROM dashboard WHERE org_id = ? AND star.dashboard_id = dashboard.id)", + "DELETE FROM dashboard_tag WHERE EXISTS (SELECT 1 FROM dashboard WHERE org_id = ? AND dashboard_tag.dashboard_id = dashboard.id)", "DELETE FROM dashboard WHERE org_id = ?", "DELETE FROM api_key WHERE org_id = ?", "DELETE FROM data_source WHERE org_id = ?", "DELETE FROM org_user WHERE org_id = ?", "DELETE FROM org WHERE id = ?", + "DELETE FROM temp_user WHERE org_id = ?", } for _, sql := range deletes { - log.Trace(sql) _, err := sess.Exec(sql, cmd.Id) if err != nil { return err diff --git a/pkg/services/sqlstore/org_users.go b/pkg/services/sqlstore/org_users.go index 2e8fc40cb7c8d..fdd671d0bfe21 100644 --- a/pkg/services/sqlstore/org_users.go +++ b/pkg/services/sqlstore/org_users.go @@ -19,6 +19,12 @@ func init() { func AddOrgUser(cmd *m.AddOrgUserCommand) error { return inTransaction(func(sess *xorm.Session) error { + // check if user exists + if res, err := sess.Query("SELECT 1 from org_user WHERE org_id=? and user_id=?", cmd.OrgId, cmd.UserId); err != nil { + return err + } else if len(res) == 1 { + return m.ErrOrgUserAlreadyAdded + } entity := m.OrgUser{ OrgId: cmd.OrgId, diff --git a/pkg/services/sqlstore/quota.go b/pkg/services/sqlstore/quota.go new file mode 100644 index 0000000000000..53ea8889c561a --- /dev/null +++ b/pkg/services/sqlstore/quota.go @@ -0,0 +1,239 @@ +package sqlstore + +import ( + "fmt" + "github.com/grafana/grafana/pkg/bus" + m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" +) + +func init() { + bus.AddHandler("sql", GetOrgQuotaByTarget) + bus.AddHandler("sql", GetOrgQuotas) + bus.AddHandler("sql", UpdateOrgQuota) + bus.AddHandler("sql", GetUserQuotaByTarget) + bus.AddHandler("sql", GetUserQuotas) + bus.AddHandler("sql", UpdateUserQuota) + bus.AddHandler("sql", GetGlobalQuotaByTarget) +} + +type targetCount struct { + Count int64 +} + +func GetOrgQuotaByTarget(query *m.GetOrgQuotaByTargetQuery) error { + quota := m.Quota{ + Target: query.Target, + OrgId: query.OrgId, + } + has, err := x.Get("a) + if err != nil { + return err + } else if has == false { + quota.Limit = query.Default + } + + //get quota used. + rawSql := fmt.Sprintf("SELECT COUNT(*) as count from %s where org_id=?", dialect.Quote(query.Target)) + resp := make([]*targetCount, 0) + if err := x.Sql(rawSql, query.OrgId).Find(&resp); err != nil { + return err + } + + query.Result = &m.OrgQuotaDTO{ + Target: query.Target, + Limit: quota.Limit, + OrgId: query.OrgId, + Used: resp[0].Count, + } + + return nil +} + +func GetOrgQuotas(query *m.GetOrgQuotasQuery) error { + quotas := make([]*m.Quota, 0) + sess := x.Table("quota") + if err := sess.Where("org_id=? AND user_id=0", query.OrgId).Find("as); err != nil { + return err + } + + defaultQuotas := setting.Quota.Org.ToMap() + + seenTargets := make(map[string]bool) + for _, q := range quotas { + seenTargets[q.Target] = true + } + + for t, v := range defaultQuotas { + if _, ok := seenTargets[t]; !ok { + quotas = append(quotas, &m.Quota{ + OrgId: query.OrgId, + Target: t, + Limit: v, + }) + } + } + + result := make([]*m.OrgQuotaDTO, len(quotas)) + for i, q := range quotas { + //get quota used. + rawSql := fmt.Sprintf("SELECT COUNT(*) as count from %s where org_id=?", dialect.Quote(q.Target)) + resp := make([]*targetCount, 0) + if err := x.Sql(rawSql, q.OrgId).Find(&resp); err != nil { + return err + } + result[i] = &m.OrgQuotaDTO{ + Target: q.Target, + Limit: q.Limit, + OrgId: q.OrgId, + Used: resp[0].Count, + } + } + query.Result = result + return nil +} + +func UpdateOrgQuota(cmd *m.UpdateOrgQuotaCmd) error { + return inTransaction2(func(sess *session) error { + //Check if quota is already defined in the DB + quota := m.Quota{ + Target: cmd.Target, + OrgId: cmd.OrgId, + } + has, err := sess.Get("a) + if err != nil { + return err + } + quota.Limit = cmd.Limit + if has == false { + //No quota in the DB for this target, so create a new one. + if _, err := sess.Insert("a); err != nil { + return err + } + } else { + //update existing quota entry in the DB. + if _, err := sess.Id(quota.Id).Update("a); err != nil { + return err + } + } + + return nil + }) +} + +func GetUserQuotaByTarget(query *m.GetUserQuotaByTargetQuery) error { + quota := m.Quota{ + Target: query.Target, + UserId: query.UserId, + } + has, err := x.Get("a) + if err != nil { + return err + } else if has == false { + quota.Limit = query.Default + } + + //get quota used. + rawSql := fmt.Sprintf("SELECT COUNT(*) as count from %s where user_id=?", dialect.Quote(query.Target)) + resp := make([]*targetCount, 0) + if err := x.Sql(rawSql, query.UserId).Find(&resp); err != nil { + return err + } + + query.Result = &m.UserQuotaDTO{ + Target: query.Target, + Limit: quota.Limit, + UserId: query.UserId, + Used: resp[0].Count, + } + + return nil +} + +func GetUserQuotas(query *m.GetUserQuotasQuery) error { + quotas := make([]*m.Quota, 0) + sess := x.Table("quota") + if err := sess.Where("user_id=? AND org_id=0", query.UserId).Find("as); err != nil { + return err + } + + defaultQuotas := setting.Quota.User.ToMap() + + seenTargets := make(map[string]bool) + for _, q := range quotas { + seenTargets[q.Target] = true + } + + for t, v := range defaultQuotas { + if _, ok := seenTargets[t]; !ok { + quotas = append(quotas, &m.Quota{ + UserId: query.UserId, + Target: t, + Limit: v, + }) + } + } + + result := make([]*m.UserQuotaDTO, len(quotas)) + for i, q := range quotas { + //get quota used. + rawSql := fmt.Sprintf("SELECT COUNT(*) as count from %s where user_id=?", dialect.Quote(q.Target)) + resp := make([]*targetCount, 0) + if err := x.Sql(rawSql, q.UserId).Find(&resp); err != nil { + return err + } + result[i] = &m.UserQuotaDTO{ + Target: q.Target, + Limit: q.Limit, + UserId: q.UserId, + Used: resp[0].Count, + } + } + query.Result = result + return nil +} + +func UpdateUserQuota(cmd *m.UpdateUserQuotaCmd) error { + return inTransaction2(func(sess *session) error { + //Check if quota is already defined in the DB + quota := m.Quota{ + Target: cmd.Target, + UserId: cmd.UserId, + } + has, err := sess.Get("a) + if err != nil { + return err + } + quota.Limit = cmd.Limit + if has == false { + //No quota in the DB for this target, so create a new one. + if _, err := sess.Insert("a); err != nil { + return err + } + } else { + //update existing quota entry in the DB. + if _, err := sess.Id(quota.Id).Update("a); err != nil { + return err + } + } + + return nil + }) +} + +func GetGlobalQuotaByTarget(query *m.GetGlobalQuotaByTargetQuery) error { + //get quota used. + rawSql := fmt.Sprintf("SELECT COUNT(*) as count from %s", dialect.Quote(query.Target)) + resp := make([]*targetCount, 0) + if err := x.Sql(rawSql).Find(&resp); err != nil { + return err + } + + query.Result = &m.GlobalQuotaDTO{ + Target: query.Target, + Limit: query.Default, + Used: resp[0].Count, + } + + return nil +} diff --git a/pkg/services/sqlstore/quota_test.go b/pkg/services/sqlstore/quota_test.go new file mode 100644 index 0000000000000..5ef618e166d72 --- /dev/null +++ b/pkg/services/sqlstore/quota_test.go @@ -0,0 +1,171 @@ +package sqlstore + +import ( + "testing" + + m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" + . "github.com/smartystreets/goconvey/convey" +) + +func TestQuotaCommandsAndQueries(t *testing.T) { + + Convey("Testing Qutoa commands & queries", t, func() { + InitTestDB(t) + userId := int64(1) + orgId := int64(0) + + setting.Quota = setting.QuotaSettings{ + Enabled: true, + Org: &setting.OrgQuota{ + User: 5, + Dashboard: 5, + DataSource: 5, + ApiKey: 5, + }, + User: &setting.UserQuota{ + Org: 5, + }, + Global: &setting.GlobalQuota{ + Org: 5, + User: 5, + Dashboard: 5, + DataSource: 5, + ApiKey: 5, + Session: 5, + }, + } + + // create a new org and add user_id 1 as admin. + // we will then have an org with 1 user. and a user + // with 1 org. + userCmd := m.CreateOrgCommand{ + Name: "TestOrg", + UserId: 1, + } + err := CreateOrg(&userCmd) + So(err, ShouldBeNil) + orgId = userCmd.Result.Id + + Convey("Given saved org quota for users", func() { + orgCmd := m.UpdateOrgQuotaCmd{ + OrgId: orgId, + Target: "org_user", + Limit: 10, + } + err := UpdateOrgQuota(&orgCmd) + So(err, ShouldBeNil) + + Convey("Should be able to get saved quota by org id and target", func() { + query := m.GetOrgQuotaByTargetQuery{OrgId: orgId, Target: "org_user", Default: 1} + err = GetOrgQuotaByTarget(&query) + + So(err, ShouldBeNil) + So(query.Result.Limit, ShouldEqual, 10) + }) + Convey("Should be able to get default quota by org id and target", func() { + query := m.GetOrgQuotaByTargetQuery{OrgId: 123, Target: "org_user", Default: 11} + err = GetOrgQuotaByTarget(&query) + + So(err, ShouldBeNil) + So(query.Result.Limit, ShouldEqual, 11) + }) + Convey("Should be able to get used org quota when rows exist", func() { + query := m.GetOrgQuotaByTargetQuery{OrgId: orgId, Target: "org_user", Default: 11} + err = GetOrgQuotaByTarget(&query) + + So(err, ShouldBeNil) + So(query.Result.Used, ShouldEqual, 1) + }) + Convey("Should be able to get used org quota when no rows exist", func() { + query := m.GetOrgQuotaByTargetQuery{OrgId: 2, Target: "org_user", Default: 11} + err = GetOrgQuotaByTarget(&query) + + So(err, ShouldBeNil) + So(query.Result.Used, ShouldEqual, 0) + }) + Convey("Should be able to quota list for org", func() { + query := m.GetOrgQuotasQuery{OrgId: orgId} + err = GetOrgQuotas(&query) + + So(err, ShouldBeNil) + So(len(query.Result), ShouldEqual, 4) + for _, res := range query.Result { + limit := 5 //default quota limit + used := 0 + if res.Target == "org_user" { + limit = 10 //customized quota limit. + used = 1 + } + So(res.Limit, ShouldEqual, limit) + So(res.Used, ShouldEqual, used) + + } + }) + }) + Convey("Given saved user quota for org", func() { + userQoutaCmd := m.UpdateUserQuotaCmd{ + UserId: userId, + Target: "org_user", + Limit: 10, + } + err := UpdateUserQuota(&userQoutaCmd) + So(err, ShouldBeNil) + + Convey("Should be able to get saved quota by user id and target", func() { + query := m.GetUserQuotaByTargetQuery{UserId: userId, Target: "org_user", Default: 1} + err = GetUserQuotaByTarget(&query) + + So(err, ShouldBeNil) + So(query.Result.Limit, ShouldEqual, 10) + }) + Convey("Should be able to get default quota by user id and target", func() { + query := m.GetUserQuotaByTargetQuery{UserId: 9, Target: "org_user", Default: 11} + err = GetUserQuotaByTarget(&query) + + So(err, ShouldBeNil) + So(query.Result.Limit, ShouldEqual, 11) + }) + Convey("Should be able to get used user quota when rows exist", func() { + query := m.GetUserQuotaByTargetQuery{UserId: userId, Target: "org_user", Default: 11} + err = GetUserQuotaByTarget(&query) + + So(err, ShouldBeNil) + So(query.Result.Used, ShouldEqual, 1) + }) + Convey("Should be able to get used user quota when no rows exist", func() { + query := m.GetUserQuotaByTargetQuery{UserId: 2, Target: "org_user", Default: 11} + err = GetUserQuotaByTarget(&query) + + So(err, ShouldBeNil) + So(query.Result.Used, ShouldEqual, 0) + }) + Convey("Should be able to quota list for user", func() { + query := m.GetUserQuotasQuery{UserId: userId} + err = GetUserQuotas(&query) + + So(err, ShouldBeNil) + So(len(query.Result), ShouldEqual, 1) + So(query.Result[0].Limit, ShouldEqual, 10) + So(query.Result[0].Used, ShouldEqual, 1) + }) + }) + + Convey("Should be able to global user quota", func() { + query := m.GetGlobalQuotaByTargetQuery{Target: "user", Default: 5} + err = GetGlobalQuotaByTarget(&query) + So(err, ShouldBeNil) + + So(query.Result.Limit, ShouldEqual, 5) + So(query.Result.Used, ShouldEqual, 0) + }) + Convey("Should be able to global org quota", func() { + query := m.GetGlobalQuotaByTargetQuery{Target: "org", Default: 5} + err = GetGlobalQuotaByTarget(&query) + So(err, ShouldBeNil) + + So(query.Result.Limit, ShouldEqual, 5) + So(query.Result.Used, ShouldEqual, 1) + }) + }) +} diff --git a/pkg/services/sqlstore/sqlstore.go b/pkg/services/sqlstore/sqlstore.go index 2dd483651c06f..88c35d630f8ad 100644 --- a/pkg/services/sqlstore/sqlstore.go +++ b/pkg/services/sqlstore/sqlstore.go @@ -136,7 +136,7 @@ func getEngine() (*xorm.Engine, error) { return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type) } - log.Info("Database: %v, ConnectionString: %v", DbCfg.Type, cnnstr) + log.Info("Database: %v", DbCfg.Type) return xorm.NewEngine(DbCfg.Type, cnnstr) } diff --git a/pkg/services/sqlstore/sqlstore.goconvey b/pkg/services/sqlstore/sqlstore.goconvey index 92feb5268a54b..8ca30789e4a41 100644 --- a/pkg/services/sqlstore/sqlstore.goconvey +++ b/pkg/services/sqlstore/sqlstore.goconvey @@ -1 +1 @@ --timeout=10s +-timeout=20s diff --git a/pkg/services/sqlstore/stats.go b/pkg/services/sqlstore/stats.go index 7995dd43f3817..044aa185f19ee 100644 --- a/pkg/services/sqlstore/stats.go +++ b/pkg/services/sqlstore/stats.go @@ -7,6 +7,18 @@ import ( func init() { bus.AddHandler("sql", GetSystemStats) + bus.AddHandler("sql", GetDataSourceStats) +} + +func GetDataSourceStats(query *m.GetDataSourceStatsQuery) error { + var rawSql = `SELECT COUNT(*) as count, type FROM data_source GROUP BY type` + query.Result = make([]*m.DataSourceStats, 0) + err := x.Sql(rawSql).Find(&query.Result) + if err != nil { + return err + } + + return err } func GetSystemStats(query *m.GetSystemStatsQuery) error { diff --git a/pkg/services/sqlstore/temp_user.go b/pkg/services/sqlstore/temp_user.go new file mode 100644 index 0000000000000..0fe5c9612f5af --- /dev/null +++ b/pkg/services/sqlstore/temp_user.go @@ -0,0 +1,121 @@ +package sqlstore + +import ( + "time" + + "github.com/go-xorm/xorm" + "github.com/grafana/grafana/pkg/bus" + m "github.com/grafana/grafana/pkg/models" +) + +func init() { + bus.AddHandler("sql", CreateTempUser) + bus.AddHandler("sql", GetTempUsersQuery) + bus.AddHandler("sql", UpdateTempUserStatus) + bus.AddHandler("sql", GetTempUserByCode) +} + +func UpdateTempUserStatus(cmd *m.UpdateTempUserStatusCommand) error { + return inTransaction(func(sess *xorm.Session) error { + var rawSql = "UPDATE temp_user SET status=? WHERE code=?" + _, err := sess.Exec(rawSql, string(cmd.Status), cmd.Code) + return err + }) +} + +func CreateTempUser(cmd *m.CreateTempUserCommand) error { + return inTransaction2(func(sess *session) error { + + // create user + user := &m.TempUser{ + Email: cmd.Email, + Name: cmd.Name, + OrgId: cmd.OrgId, + Code: cmd.Code, + Role: cmd.Role, + Status: cmd.Status, + RemoteAddr: cmd.RemoteAddr, + InvitedByUserId: cmd.InvitedByUserId, + Created: time.Now(), + Updated: time.Now(), + } + + if _, err := sess.Insert(user); err != nil { + return err + } + + cmd.Result = user + return nil + }) +} + +func GetTempUsersQuery(query *m.GetTempUsersQuery) error { + rawSql := `SELECT + tu.id as id, + tu.org_id as org_id, + tu.email as email, + tu.name as name, + tu.role as role, + tu.code as code, + tu.status as status, + tu.email_sent as email_sent, + tu.email_sent_on as email_sent_on, + tu.created as created, + u.login as invited_by_login, + u.name as invited_by_name, + u.email as invited_by_email + FROM ` + dialect.Quote("temp_user") + ` as tu + LEFT OUTER JOIN ` + dialect.Quote("user") + ` as u on u.id = tu.invited_by_user_id + WHERE tu.status=?` + params := []interface{}{string(query.Status)} + + if query.OrgId > 0 { + rawSql += ` AND tu.org_id=?` + params = append(params, query.OrgId) + } + + if query.Email != "" { + rawSql += ` AND tu.email=?` + params = append(params, query.Email) + } + + rawSql += " ORDER BY tu.created desc" + + query.Result = make([]*m.TempUserDTO, 0) + sess := x.Sql(rawSql, params...) + err := sess.Find(&query.Result) + return err +} + +func GetTempUserByCode(query *m.GetTempUserByCodeQuery) error { + var rawSql = `SELECT + tu.id as id, + tu.org_id as org_id, + tu.email as email, + tu.name as name, + tu.role as role, + tu.code as code, + tu.status as status, + tu.email_sent as email_sent, + tu.email_sent_on as email_sent_on, + tu.created as created, + u.login as invited_by_login, + u.name as invited_by_name, + u.email as invited_by_email + FROM ` + dialect.Quote("temp_user") + ` as tu + LEFT OUTER JOIN ` + dialect.Quote("user") + ` as u on u.id = tu.invited_by_user_id + WHERE tu.code=?` + + var tempUser m.TempUserDTO + sess := x.Sql(rawSql, query.Code) + has, err := sess.Get(&tempUser) + + if err != nil { + return err + } else if has == false { + return m.ErrTempUserNotFound + } + + query.Result = &tempUser + return err +} diff --git a/pkg/services/sqlstore/temp_user_test.go b/pkg/services/sqlstore/temp_user_test.go new file mode 100644 index 0000000000000..ebf753890f660 --- /dev/null +++ b/pkg/services/sqlstore/temp_user_test.go @@ -0,0 +1,59 @@ +package sqlstore + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" + + m "github.com/grafana/grafana/pkg/models" +) + +func TestTempUserCommandsAndQueries(t *testing.T) { + + Convey("Testing Temp User commands & queries", t, func() { + InitTestDB(t) + + Convey("Given saved api key", func() { + cmd := m.CreateTempUserCommand{ + OrgId: 2256, + Name: "hello", + Code: "asd", + Email: "e@as.co", + Status: m.TmpUserInvitePending, + } + err := CreateTempUser(&cmd) + So(err, ShouldBeNil) + + Convey("Should be able to get temp users by org id", func() { + query := m.GetTempUsersQuery{OrgId: 2256, Status: m.TmpUserInvitePending} + err = GetTempUsersQuery(&query) + + So(err, ShouldBeNil) + So(len(query.Result), ShouldEqual, 1) + }) + + Convey("Should be able to get temp users by email", func() { + query := m.GetTempUsersQuery{Email: "e@as.co", Status: m.TmpUserInvitePending} + err = GetTempUsersQuery(&query) + + So(err, ShouldBeNil) + So(len(query.Result), ShouldEqual, 1) + }) + + Convey("Should be able to get temp users by code", func() { + query := m.GetTempUserByCodeQuery{Code: "asd"} + err = GetTempUserByCode(&query) + + So(err, ShouldBeNil) + So(query.Result.Name, ShouldEqual, "hello") + }) + + Convey("Should be able update status", func() { + cmd2 := m.UpdateTempUserStatusCommand{Code: "asd", Status: m.TmpUserRevoked} + err := UpdateTempUserStatus(&cmd2) + So(err, ShouldBeNil) + }) + + }) + }) +} diff --git a/pkg/services/sqlstore/user.go b/pkg/services/sqlstore/user.go index f5df6f9ff1f00..96b8c24b8fcb0 100644 --- a/pkg/services/sqlstore/user.go +++ b/pkg/services/sqlstore/user.go @@ -1,7 +1,6 @@ package sqlstore import ( - "fmt" "strings" "time" @@ -30,7 +29,11 @@ func init() { bus.AddHandler("sql", UpdateUserPermissions) } -func getOrgIdForNewUser(userEmail string, sess *session) (int64, error) { +func getOrgIdForNewUser(cmd *m.CreateUserCommand, sess *session) (int64, error) { + if cmd.SkipOrgSetup { + return -1, nil + } + var org m.Org if setting.AutoAssignOrg { @@ -46,7 +49,10 @@ func getOrgIdForNewUser(userEmail string, sess *session) (int64, error) { org.Id = 1 } } else { - org.Name = userEmail + org.Name = cmd.OrgName + if len(org.Name) == 0 { + org.Name = util.StringsFallback2(cmd.Email, cmd.Login) + } } org.Created = time.Now() @@ -67,21 +73,26 @@ func getOrgIdForNewUser(userEmail string, sess *session) (int64, error) { func CreateUser(cmd *m.CreateUserCommand) error { return inTransaction2(func(sess *session) error { - orgId, err := getOrgIdForNewUser(cmd.Email, sess) + orgId, err := getOrgIdForNewUser(cmd, sess) if err != nil { return err } + if cmd.Email == "" { + cmd.Email = cmd.Login + } + // create user user := m.User{ - Email: cmd.Email, - Name: cmd.Name, - Login: cmd.Login, - Company: cmd.Company, - IsAdmin: cmd.IsAdmin, - OrgId: orgId, - Created: time.Now(), - Updated: time.Now(), + Email: cmd.Email, + Name: cmd.Name, + Login: cmd.Login, + Company: cmd.Company, + IsAdmin: cmd.IsAdmin, + OrgId: orgId, + EmailVerified: cmd.EmailVerified, + Created: time.Now(), + Updated: time.Now(), } if len(cmd.Password) > 0 { @@ -96,23 +107,6 @@ func CreateUser(cmd *m.CreateUserCommand) error { return err } - // create org user link - orgUser := m.OrgUser{ - OrgId: orgId, - UserId: user.Id, - Role: m.ROLE_ADMIN, - Created: time.Now(), - Updated: time.Now(), - } - - if setting.AutoAssignOrg && !user.IsAdmin { - orgUser.Role = m.RoleType(setting.AutoAssignOrgRole) - } - - if _, err = sess.Insert(&orgUser); err != nil { - return err - } - sess.publishAfterCommit(&events.UserCreated{ Timestamp: user.Created, Id: user.Id, @@ -122,6 +116,26 @@ func CreateUser(cmd *m.CreateUserCommand) error { }) cmd.Result = user + + // create org user link + if !cmd.SkipOrgSetup { + orgUser := m.OrgUser{ + OrgId: orgId, + UserId: user.Id, + Role: m.ROLE_ADMIN, + Created: time.Now(), + Updated: time.Now(), + } + + if setting.AutoAssignOrg && !user.IsAdmin { + orgUser.Role = m.RoleType(setting.AutoAssignOrgRole) + } + + if _, err = sess.Insert(&orgUser); err != nil { + return err + } + } + return nil }) } @@ -305,9 +319,19 @@ func SearchUsers(query *m.SearchUsersQuery) error { func DeleteUser(cmd *m.DeleteUserCommand) error { return inTransaction(func(sess *xorm.Session) error { - var rawSql = fmt.Sprintf("DELETE FROM %s WHERE id=?", x.Dialect().Quote("user")) - _, err := sess.Exec(rawSql, cmd.UserId) - return err + deletes := []string{ + "DELETE FROM star WHERE user_id = ?", + "DELETE FROM " + dialect.Quote("user") + " WHERE id = ?", + } + + for _, sql := range deletes { + _, err := sess.Exec(sql, cmd.UserId) + if err != nil { + return err + } + } + + return nil }) } diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index 478e9dcb8c52d..d6a853a9cdc12 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -6,6 +6,7 @@ package setting import ( "bytes" "encoding/json" + "errors" "fmt" "net/url" "os" @@ -73,12 +74,14 @@ var ( CookieRememberName string DisableGravatar bool EmailCodeValidMinutes int + DataProxyWhiteList map[string]bool // User settings AllowUserSignUp bool AllowUserOrgCreate bool AutoAssignOrg bool AutoAssignOrgRole string + VerifyEmailEnabled bool // Http auth AdminUser string @@ -114,8 +117,9 @@ var ( appliedCommandLineProperties []string appliedEnvOverrides []string - ReportingEnabled bool - GoogleAnalyticsId string + ReportingEnabled bool + GoogleAnalyticsId string + GoogleTagManagerId string // LDAP LdapEnabled bool @@ -123,6 +127,9 @@ var ( // SMTP email settings Smtp SmtpSettings + + // QUOTA + Quota QuotaSettings ) type CommandLineArgs struct { @@ -133,7 +140,7 @@ type CommandLineArgs struct { func init() { IsWindows = runtime.GOOS == "windows" - log.NewLogger(0, "console", `{"level": 0}`) + log.NewLogger(0, "console", `{"level": 0, "formatting":true}`) } func parseAppUrlAndSubUrl(section *ini.Section) (string, string) { @@ -273,7 +280,7 @@ func loadSpecifedConfigFile(configFile string) { } defaultKey, err := defaultSec.GetKey(key.Name()) if err != nil { - log.Error(3, "Unknown config key %s defined in section %s, in file", key.Name(), section.Name(), configFile) + log.Error(3, "Unknown config key %s defined in section %s, in file %s", key.Name(), section.Name(), configFile) continue } defaultKey.SetValue(key.Value()) @@ -352,7 +359,26 @@ func setHomePath(args *CommandLineArgs) { } } -func NewConfigContext(args *CommandLineArgs) { +var skipStaticRootValidation bool = false + +func validateStaticRootPath() error { + if skipStaticRootValidation { + return nil + } + + if _, err := os.Stat(path.Join(StaticRootPath, "css")); err == nil { + return nil + } + + if _, err := os.Stat(StaticRootPath + "_gen/css"); err == nil { + StaticRootPath = StaticRootPath + "_gen" + return nil + } + + return errors.New("Failed to detect generated css or javascript files in static root (%s), have you executed default grunt task?") +} + +func NewConfigContext(args *CommandLineArgs) error { setHomePath(args) loadConfiguration(args) @@ -371,11 +397,16 @@ func NewConfigContext(args *CommandLineArgs) { Domain = server.Key("domain").MustString("localhost") HttpAddr = server.Key("http_addr").MustString("0.0.0.0") HttpPort = server.Key("http_port").MustString("3000") - StaticRootPath = makeAbsolute(server.Key("static_root_path").String(), HomePath) RouterLogging = server.Key("router_logging").MustBool(false) EnableGzip = server.Key("enable_gzip").MustBool(false) EnforceDomain = server.Key("enforce_domain").MustBool(false) + StaticRootPath = makeAbsolute(server.Key("static_root_path").String(), HomePath) + + if err := validateStaticRootPath(); err != nil { + return err + } + // read security settings security := Cfg.Section("security") SecretKey = security.Key("secret_key").String() LogInRememberDays = security.Key("login_remember_days").MustInt() @@ -383,6 +414,12 @@ func NewConfigContext(args *CommandLineArgs) { CookieRememberName = security.Key("cookie_remember_name").String() DisableGravatar = security.Key("disable_gravatar").MustBool(true) + // read data source proxy white list + DataProxyWhiteList = make(map[string]bool) + for _, hostAndIp := range security.Key("data_source_proxy_whitelist").Strings(" ") { + DataProxyWhiteList[hostAndIp] = true + } + // admin AdminUser = security.Key("admin_user").String() AdminPassword = security.Key("admin_password").String() @@ -391,7 +428,8 @@ func NewConfigContext(args *CommandLineArgs) { AllowUserSignUp = users.Key("allow_sign_up").MustBool(true) AllowUserOrgCreate = users.Key("allow_org_create").MustBool(true) AutoAssignOrg = users.Key("auto_assign_org").MustBool(true) - AutoAssignOrgRole = users.Key("auto_assign_org_role").In("Editor", []string{"Editor", "Admin", "Viewer"}) + AutoAssignOrgRole = users.Key("auto_assign_org_role").In("Editor", []string{"Editor", "Admin", "Read Only Editor", "Viewer"}) + VerifyEmailEnabled = users.Key("verify_email_enabled").MustBool(false) // anonymous access AnonymousEnabled = Cfg.Section("auth.anonymous").Key("enabled").MustBool(false) @@ -415,6 +453,7 @@ func NewConfigContext(args *CommandLineArgs) { analytics := Cfg.Section("analytics") ReportingEnabled = analytics.Key("reporting_enabled").MustBool(true) GoogleAnalyticsId = analytics.Key("google_analytics_ua_id").String() + GoogleTagManagerId = analytics.Key("google_tag_manager_id").String() ldapSec := Cfg.Section("auth.ldap") LdapEnabled = ldapSec.Key("enabled").MustBool(false) @@ -422,6 +461,13 @@ func NewConfigContext(args *CommandLineArgs) { readSessionConfig() readSmtpSettings() + readQuotaSettings() + + if VerifyEmailEnabled && !Smtp.Enabled { + log.Warn("require_email_validation is enabled but smpt is disabled") + } + + return nil } func readSessionConfig() { @@ -456,6 +502,8 @@ var logLevels = map[string]int{ } func initLogging(args *CommandLineArgs) { + //close any existing log handlers. + log.Close() // Get and check log mode. LogModes = strings.Split(Cfg.Section("log").Key("mode").MustString("console"), ",") LogsPath = makeAbsolute(Cfg.Section("paths").Key("logs").String(), HomePath) @@ -479,7 +527,11 @@ func initLogging(args *CommandLineArgs) { // Generate log configuration. switch mode { case "console": - LogConfigs[i] = util.DynMap{"level": level} + formatting := sec.Key("formatting").MustBool(true) + LogConfigs[i] = util.DynMap{ + "level": level, + "formatting": formatting, + } case "file": logPath := sec.Key("file_name").MustString(filepath.Join(LogsPath, "grafana.log")) os.MkdirAll(filepath.Dir(logPath), os.ModePerm) @@ -540,7 +592,7 @@ func LogConfigurationInfo() { if len(appliedEnvOverrides) > 0 { text.WriteString("\tEnvironment variables used:\n") - for i, prop := range appliedCommandLineProperties { + for i, prop := range appliedEnvOverrides { text.WriteString(fmt.Sprintf(" [%d]: %s\n", i, prop)) } } diff --git a/pkg/setting/setting_quota.go b/pkg/setting/setting_quota.go new file mode 100644 index 0000000000000..49769d9930fdf --- /dev/null +++ b/pkg/setting/setting_quota.go @@ -0,0 +1,94 @@ +package setting + +import ( + "reflect" +) + +type OrgQuota struct { + User int64 `target:"org_user"` + DataSource int64 `target:"data_source"` + Dashboard int64 `target:"dashboard"` + ApiKey int64 `target:"api_key"` +} + +type UserQuota struct { + Org int64 `target:"org_user"` +} + +type GlobalQuota struct { + Org int64 `target:"org"` + User int64 `target:"user"` + DataSource int64 `target:"data_source"` + Dashboard int64 `target:"dashboard"` + ApiKey int64 `target:"api_key"` + Session int64 `target:"-"` +} + +func (q *OrgQuota) ToMap() map[string]int64 { + return quotaToMap(*q) +} + +func (q *UserQuota) ToMap() map[string]int64 { + return quotaToMap(*q) +} + +func (q *GlobalQuota) ToMap() map[string]int64 { + return quotaToMap(*q) +} + +func quotaToMap(q interface{}) map[string]int64 { + qMap := make(map[string]int64) + typ := reflect.TypeOf(q) + val := reflect.ValueOf(q) + + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + name := field.Tag.Get("target") + if name == "" { + name = field.Name + } + if name == "-" { + continue + } + value := val.Field(i) + qMap[name] = value.Int() + } + return qMap +} + +type QuotaSettings struct { + Enabled bool + Org *OrgQuota + User *UserQuota + Global *GlobalQuota +} + +func readQuotaSettings() { + // set global defaults. + quota := Cfg.Section("quota") + Quota.Enabled = quota.Key("enabled").MustBool(false) + + // per ORG Limits + Quota.Org = &OrgQuota{ + User: quota.Key("org_user").MustInt64(10), + DataSource: quota.Key("org_data_source").MustInt64(10), + Dashboard: quota.Key("org_dashboard").MustInt64(10), + ApiKey: quota.Key("org_api_key").MustInt64(10), + } + + // per User limits + Quota.User = &UserQuota{ + Org: quota.Key("user_org").MustInt64(10), + } + + // Global Limits + Quota.Global = &GlobalQuota{ + User: quota.Key("global_user").MustInt64(-1), + Org: quota.Key("global_org").MustInt64(-1), + DataSource: quota.Key("global_data_source").MustInt64(-1), + Dashboard: quota.Key("global_dashboard").MustInt64(-1), + ApiKey: quota.Key("global_api_key").MustInt64(-1), + Session: quota.Key("global_session").MustInt64(-1), + } + +} diff --git a/pkg/setting/setting_test.go b/pkg/setting/setting_test.go index 00da9b4f41628..ef44f55551c9c 100644 --- a/pkg/setting/setting_test.go +++ b/pkg/setting/setting_test.go @@ -11,9 +11,11 @@ import ( func TestLoadingSettings(t *testing.T) { Convey("Testing loading settings from ini file", t, func() { + skipStaticRootValidation = true Convey("Given the default ini files", func() { - NewConfigContext(&CommandLineArgs{HomePath: "../../"}) + err := NewConfigContext(&CommandLineArgs{HomePath: "../../"}) + So(err, ShouldBeNil) So(AdminUser, ShouldEqual, "admin") }) diff --git a/pkg/social/social.go b/pkg/social/social.go index 1a00934b93760..4a8cb8bac5d10 100644 --- a/pkg/social/social.go +++ b/pkg/social/social.go @@ -193,7 +193,7 @@ func (s *SocialGithub) FetchPrivateEmail(client *http.Client) (string, error) { Verified bool `json:"verified"` } - emailsUrl := fmt.Sprintf("https://api.github.com/user/emails") + emailsUrl := fmt.Sprintf(s.apiUrl + "/emails") r, err := client.Get(emailsUrl) if err != nil { return "", err @@ -222,7 +222,7 @@ func (s *SocialGithub) FetchTeamMemberships(client *http.Client) ([]int, error) Id int `json:"id"` } - membershipUrl := fmt.Sprintf("https://api.github.com/user/teams") + membershipUrl := fmt.Sprintf(s.apiUrl + "/teams") r, err := client.Get(membershipUrl) if err != nil { return nil, err @@ -249,7 +249,7 @@ func (s *SocialGithub) FetchOrganizations(client *http.Client) ([]string, error) Login string `json:"login"` } - url := fmt.Sprintf("https://api.github.com/user/orgs") + url := fmt.Sprintf(s.apiUrl + "/orgs") r, err := client.Get(url) if err != nil { return nil, err diff --git a/pkg/util/strings.go b/pkg/util/strings.go new file mode 100644 index 0000000000000..7e503a99118a4 --- /dev/null +++ b/pkg/util/strings.go @@ -0,0 +1,18 @@ +package util + +func StringsFallback2(val1 string, val2 string) string { + if val1 != "" { + return val1 + } + return val2 +} + +func StringsFallback3(val1 string, val2 string, val3 string) string { + if val1 != "" { + return val1 + } + if val2 != "" { + return val2 + } + return val3 +} diff --git a/public/app/app.js b/public/app/app.js index 4d1dec4f67319..0637b3aeea76f 100644 --- a/public/app/app.js +++ b/public/app/app.js @@ -1,38 +1,27 @@ -/** - * main app level module - */ define([ 'angular', 'jquery', 'lodash', 'require', - 'config', 'bootstrap', 'angular-route', 'angular-sanitize', 'angular-strap', 'angular-dragdrop', - 'extend-jquery', + 'angular-ui', 'bindonce', + 'app/core/core', ], function (angular, $, _, appLevelRequire) { - "use strict"; - var app = angular.module('grafana', []), - // we will keep a reference to each module defined before boot, so that we can - // go back and allow it to define new features later. Once we boot, this will be false - pre_boot_modules = [], - // these are the functions that we need to call to register different - // features if we define them after boot time - register_fns = {}; + var app = angular.module('grafana', []); + var register_fns = {}; + var preBootModules = []; // This stores the grafana version number app.constant('grafanaVersion',"@grafanaVersion@"); - // Use this for cache busting partials - app.constant('cacheBust',"cache-bust="+Date.now()); - /** * Tells the application to watch the module, once bootstraping has completed * the modules controller, service, etc. functions will be overwritten to register directly @@ -41,8 +30,8 @@ function (angular, $, _, appLevelRequire) { * @return {[type]} [description] */ app.useModule = function (module) { - if (pre_boot_modules) { - pre_boot_modules.push(module); + if (preBootModules) { + preBootModules.push(module); } else { _.extend(module, register_fns); } @@ -50,7 +39,6 @@ function (angular, $, _, appLevelRequire) { }; app.config(function($locationProvider, $controllerProvider, $compileProvider, $filterProvider, $provide) { - // this is how the internet told me to dynamically add modules :/ register_fns.controller = $controllerProvider.register; register_fns.directive = $compileProvider.directive; register_fns.factory = $provide.factory; @@ -59,12 +47,15 @@ function (angular, $, _, appLevelRequire) { }); var apps_deps = [ + 'grafana.core', 'ngRoute', 'ngSanitize', '$strap.directives', 'ang-drag-drop', 'grafana', - 'pasvaz.bindonce' + 'pasvaz.bindonce', + 'ui.bootstrap', + 'ui.bootstrap.tpls', ]; var module_types = ['controllers', 'directives', 'factories', 'services', 'filters', 'routes']; @@ -78,13 +69,7 @@ function (angular, $, _, appLevelRequire) { }); var preBootRequires = [ - 'services/all', - 'features/all', - 'controllers/all', - 'directives/all', - 'filters/all', - 'components/partials', - 'routes/all', + 'app/features/all', ]; app.boot = function() { @@ -99,22 +84,18 @@ function (angular, $, _, appLevelRequire) { .ready(function() { angular.bootstrap(document, apps_deps) .invoke(['$rootScope', function ($rootScope) { - _.each(pre_boot_modules, function (module) { + _.each(preBootModules, function (module) { _.extend(module, register_fns); }); - pre_boot_modules = false; + + preBootModules = null; $rootScope.requireContext = appLevelRequire; $rootScope.require = function (deps, fn) { var $scope = this; $scope.requireContext(deps, function () { var deps = _.toArray(arguments); - // Check that this is a valid scope. - if($scope.$id) { - $scope.$apply(function () { - fn.apply($scope, deps); - }); - } + fn.apply($scope, deps); }); }; }]); diff --git a/public/app/components/kbn.js b/public/app/components/kbn.js deleted file mode 100644 index d817ada2ebe3f..0000000000000 --- a/public/app/components/kbn.js +++ /dev/null @@ -1,613 +0,0 @@ -define([ - 'jquery', - 'lodash', - 'moment' -], -function($, _, moment) { - 'use strict'; - - var kbn = {}; - kbn.valueFormats = {}; - - kbn.round_interval = function(interval) { - switch (true) { - // 0.5s - case (interval <= 500): - return 100; // 0.1s - // 5s - case (interval <= 5000): - return 1000; // 1s - // 7.5s - case (interval <= 7500): - return 5000; // 5s - // 15s - case (interval <= 15000): - return 10000; // 10s - // 45s - case (interval <= 45000): - return 30000; // 30s - // 3m - case (interval <= 180000): - return 60000; // 1m - // 9m - case (interval <= 450000): - return 300000; // 5m - // 20m - case (interval <= 1200000): - return 600000; // 10m - // 45m - case (interval <= 2700000): - return 1800000; // 30m - // 2h - case (interval <= 7200000): - return 3600000; // 1h - // 6h - case (interval <= 21600000): - return 10800000; // 3h - // 24h - case (interval <= 86400000): - return 43200000; // 12h - // 48h - case (interval <= 172800000): - return 86400000; // 24h - // 1w - case (interval <= 604800000): - return 86400000; // 24h - // 3w - case (interval <= 1814400000): - return 604800000; // 1w - // 2y - case (interval < 3628800000): - return 2592000000; // 30d - default: - return 31536000000; // 1y - } - }; - - kbn.secondsToHms = function(seconds) { - var numyears = Math.floor(seconds / 31536000); - if(numyears){ - return numyears + 'y'; - } - var numdays = Math.floor((seconds % 31536000) / 86400); - if(numdays){ - return numdays + 'd'; - } - var numhours = Math.floor(((seconds % 31536000) % 86400) / 3600); - if(numhours){ - return numhours + 'h'; - } - var numminutes = Math.floor((((seconds % 31536000) % 86400) % 3600) / 60); - if(numminutes){ - return numminutes + 'm'; - } - var numseconds = Math.floor((((seconds % 31536000) % 86400) % 3600) % 60); - if(numseconds){ - return numseconds + 's'; - } - var nummilliseconds = Math.floor(seconds * 1000.0); - if(nummilliseconds){ - return nummilliseconds + 'ms'; - } - - return 'less then a millisecond'; //'just now' //or other string you like; - }; - - kbn.to_percent = function(number,outof) { - return Math.floor((number/outof)*10000)/100 + "%"; - }; - - kbn.addslashes = function(str) { - str = str.replace(/\\/g, '\\\\'); - str = str.replace(/\'/g, '\\\''); - str = str.replace(/\"/g, '\\"'); - str = str.replace(/\0/g, '\\0'); - return str; - }; - - kbn.interval_regex = /(\d+(?:\.\d+)?)([Mwdhmsy])/; - - // histogram & trends - kbn.intervals_in_seconds = { - y: 31536000, - M: 2592000, - w: 604800, - d: 86400, - h: 3600, - m: 60, - s: 1 - }; - - kbn.calculateInterval = function(range, resolution, userInterval) { - var lowLimitMs = 1; // 1 millisecond default low limit - var intervalMs, lowLimitInterval; - - if (userInterval) { - if (userInterval[0] === '>') { - lowLimitInterval = userInterval.slice(1); - lowLimitMs = kbn.interval_to_ms(lowLimitInterval); - } - else { - return userInterval; - } - } - - intervalMs = kbn.round_interval((range.to.valueOf() - range.from.valueOf()) / resolution); - if (lowLimitMs > intervalMs) { - intervalMs = lowLimitMs; - } - - return kbn.secondsToHms(intervalMs / 1000); - }; - - kbn.describe_interval = function (string) { - var matches = string.match(kbn.interval_regex); - if (!matches || !_.has(kbn.intervals_in_seconds, matches[2])) { - throw new Error('Invalid interval string, expexcting a number followed by one of "Mwdhmsy"'); - } else { - return { - sec: kbn.intervals_in_seconds[matches[2]], - type: matches[2], - count: parseInt(matches[1], 10) - }; - } - }; - - kbn.interval_to_ms = function(string) { - var info = kbn.describe_interval(string); - return info.sec * 1000 * info.count; - }; - - kbn.interval_to_seconds = function (string) { - var info = kbn.describe_interval(string); - return info.sec * info.count; - }; - - /* This is a simplified version of elasticsearch's date parser */ - kbn.parseDate = function(text) { - if(_.isDate(text)) { - return text; - } - - var time; - var mathString = ""; - var index; - var parseString; - - if (text.substring(0,3) === "now") { - time = new Date(); - mathString = text.substring(3); - } - else if (text.substring(0,5) === 'today') { - time = new Date(); - time.setHours(0,0,0,0); - mathString = text.substring(5); - } - else { - index = text.indexOf("||"); - parseString; - if (index === -1) { - parseString = text; - mathString = ""; // nothing else - } else { - parseString = text.substring(0, index); - mathString = text.substring(index + 2); - } - // We're going to just require ISO8601 timestamps, k? - time = new Date(parseString); - } - - if (!mathString.length) { - return time; - } - - //return [time,parseString,mathString]; - return kbn.parseDateMath(mathString, time); - }; - - kbn._timespanRegex = /^\d+[h,m,M,w,s,H,d]$/; - kbn.isValidTimeSpan = function(str) { - return kbn._timespanRegex.test(str); - }; - - kbn.parseDateMath = function(mathString, time, roundUp) { - var dateTime = moment(time); - for (var i = 0; i < mathString.length;) { - var c = mathString.charAt(i++), - type, - num, - unit; - if (c === '/') { - type = 0; - } else if (c === '+') { - type = 1; - } else if (c === '-') { - type = 2; - } else { - return false; - } - - if (isNaN(mathString.charAt(i))) { - num = 1; - } else { - var numFrom = i; - while (!isNaN(mathString.charAt(i))) { - i++; - } - num = parseInt(mathString.substring(numFrom, i),10); - } - if (type === 0) { - // rounding is only allowed on whole numbers - if (num !== 1) { - return false; - } - } - unit = mathString.charAt(i++); - switch (unit) { - case 'y': - if (type === 0) { - roundUp ? dateTime.endOf('year') : dateTime.startOf('year'); - } else if (type === 1) { - dateTime.add(num, 'years'); - } else if (type === 2) { - dateTime.subtract(num, 'years'); - } - break; - case 'M': - if (type === 0) { - roundUp ? dateTime.endOf('month') : dateTime.startOf('month'); - } else if (type === 1) { - dateTime.add(num, 'months'); - } else if (type === 2) { - dateTime.subtract(num, 'months'); - } - break; - case 'w': - if (type === 0) { - roundUp ? dateTime.endOf('week') : dateTime.startOf('week'); - } else if (type === 1) { - dateTime.add(num, 'weeks'); - } else if (type === 2) { - dateTime.subtract(num, 'weeks'); - } - break; - case 'd': - if (type === 0) { - roundUp ? dateTime.endOf('day') : dateTime.startOf('day'); - } else if (type === 1) { - dateTime.add(num, 'days'); - } else if (type === 2) { - dateTime.subtract(num, 'days'); - } - break; - case 'h': - case 'H': - if (type === 0) { - roundUp ? dateTime.endOf('hour') : dateTime.startOf('hour'); - } else if (type === 1) { - dateTime.add(num, 'hours'); - } else if (type === 2) { - dateTime.subtract(num,'hours'); - } - break; - case 'm': - if (type === 0) { - roundUp ? dateTime.endOf('minute') : dateTime.startOf('minute'); - } else if (type === 1) { - dateTime.add(num, 'minutes'); - } else if (type === 2) { - dateTime.subtract(num, 'minutes'); - } - break; - case 's': - if (type === 0) { - roundUp ? dateTime.endOf('second') : dateTime.startOf('second'); - } else if (type === 1) { - dateTime.add(num, 'seconds'); - } else if (type === 2) { - dateTime.subtract(num, 'seconds'); - } - break; - default: - return false; - } - } - return dateTime.toDate(); - }; - - kbn.query_color_dot = function (color, diameter) { - return '
    '; - }; - - kbn.valueFormats.percent = function(size, decimals) { - return kbn.toFixed(size, decimals) + '%'; - }; - - kbn.formatFuncCreator = function(factor, extArray) { - return function(size, decimals, scaledDecimals) { - if (size === null) { - return ""; - } - - var steps = 0; - var limit = extArray.length; - - while (Math.abs(size) >= factor) { - steps++; - size /= factor; - - if (steps >= limit) { return "NA"; } - } - - if (steps > 0 && scaledDecimals !== null) { - decimals = scaledDecimals + (3 * steps); - } - - return kbn.toFixed(size, decimals) + extArray[steps]; - }; - }; - - kbn.toFixed = function(value, decimals) { - if (value === null) { - return ""; - } - - var factor = decimals ? Math.pow(10, Math.max(0, decimals)) : 1; - var formatted = String(Math.round(value * factor) / factor); - - // if exponent return directly - if (formatted.indexOf('e') !== -1 || value === 0) { - return formatted; - } - - // If tickDecimals was specified, ensure that we have exactly that - // much precision; otherwise default to the value's own precision. - if (decimals != null) { - var decimalPos = formatted.indexOf("."); - var precision = decimalPos === -1 ? 0 : formatted.length - decimalPos - 1; - if (precision < decimals) { - return (precision ? formatted : formatted + ".") + (String(factor)).substr(1, decimals - precision); - } - } - - return formatted; - }; - - kbn.valueFormats.bits = kbn.formatFuncCreator(1024, [' b', ' Kib', ' Mib', ' Gib', ' Tib', ' Pib', ' Eib', ' Zib', ' Yib']); - kbn.valueFormats.bytes = kbn.formatFuncCreator(1024, [' B', ' KiB', ' MiB', ' GiB', ' TiB', ' PiB', ' EiB', ' ZiB', ' YiB']); - kbn.valueFormats.kbytes = kbn.formatFuncCreator(1024, [' KiB', ' MiB', ' GiB', ' TiB', ' PiB', ' EiB', ' ZiB', ' YiB']); - kbn.valueFormats.mbytes = kbn.formatFuncCreator(1024, [' MiB', ' GiB', ' TiB', ' PiB', ' EiB', ' ZiB', ' YiB']); - kbn.valueFormats.gbytes = kbn.formatFuncCreator(1024, [' GiB', ' TiB', ' PiB', ' EiB', ' ZiB', ' YiB']); - kbn.valueFormats.bps = kbn.formatFuncCreator(1000, [' bps', ' Kbps', ' Mbps', ' Gbps', ' Tbps', ' Pbps', ' Ebps', ' Zbps', ' Ybps']); - kbn.valueFormats.pps = kbn.formatFuncCreator(1000, [' pps', ' Kpps', ' Mpps', ' Gpps', ' Tpps', ' Ppps', ' Epps', ' Zpps', ' Ypps']); - kbn.valueFormats.Bps = kbn.formatFuncCreator(1000, [' Bps', ' KBps', ' MBps', ' GBps', ' TBps', ' PBps', ' EBps', ' ZBps', ' YBps']); - kbn.valueFormats.short = kbn.formatFuncCreator(1000, ['', ' K', ' Mil', ' Bil', ' Tri', ' Quadr', ' Quint', ' Sext', ' Sept']); - kbn.valueFormats.joule = kbn.formatFuncCreator(1000, [' J', ' kJ', ' MJ', ' GJ', ' TJ', ' PJ', ' EJ', ' ZJ', ' YJ']); - kbn.valueFormats.amp = kbn.formatFuncCreator(1000, [' A', ' kA', ' MA', ' GA', ' TA', ' PA', ' EA', ' ZA', ' YA']); - kbn.valueFormats.volt = kbn.formatFuncCreator(1000, [' V', ' kV', ' MV', ' GV', ' TV', ' PV', ' EV', ' ZV', ' YV']); - kbn.valueFormats.hertz = kbn.formatFuncCreator(1000, [' Hz', ' kHz', ' MHz', ' GHz', ' THz', ' PHz', ' EHz', ' ZHz', ' YHz']); - kbn.valueFormats.watt = kbn.formatFuncCreator(1000, [' W', ' kW', ' MW', ' GW', ' TW', ' PW', ' EW', ' ZW', ' YW']); - kbn.valueFormats.kwatt = kbn.formatFuncCreator(1000, [' kW', ' MW', ' GW', ' TW', ' PW', ' EW', ' ZW', ' YW']); - kbn.valueFormats.watth = kbn.formatFuncCreator(1000, [' Wh', ' kWh', ' MWh', ' GWh', ' TWh', ' PWh', ' EWh', ' ZWh', ' YWh']); - kbn.valueFormats.kwatth = kbn.formatFuncCreator(1000, [' kWh', ' MWh', ' GWh', ' TWh', ' PWh', ' EWh', ' ZWh', ' YWh']); - kbn.valueFormats.ev = kbn.formatFuncCreator(1000, [' eV', ' keV', ' MeV', 'GeV', 'TeV', 'PeV', 'EeV', 'ZeV', 'YeV']); - kbn.valueFormats.none = kbn.toFixed; - kbn.valueFormats.celsius = function(value, decimals) { return kbn.toFixed(value, decimals) + ' °C'; }; - kbn.valueFormats.farenheit = function(value, decimals) { return kbn.toFixed(value, decimals) + ' °F'; }; - kbn.valueFormats.humidity = function(value, decimals) { return kbn.toFixed(value, decimals) + ' %H'; }; - kbn.valueFormats.pressurembar = function(value, decimals) { return kbn.toFixed(value, decimals) + ' mbar'; }; - kbn.valueFormats.pressurehpa = function(value, decimals) { return kbn.toFixed(value, decimals) + ' hPa'; }; - kbn.valueFormats.ppm = function(value, decimals) { return kbn.toFixed(value, decimals) + ' ppm'; }; - kbn.valueFormats.velocityms = function(value, decimals) { return kbn.toFixed(value, decimals) + ' m/s'; }; - kbn.valueFormats.velocitykmh = function(value, decimals) { return kbn.toFixed(value, decimals) + ' km/h'; }; - kbn.valueFormats.velocitymph = function(value, decimals) { return kbn.toFixed(value, decimals) + ' mph'; }; - kbn.valueFormats.velocityknot = function(value, decimals) { return kbn.toFixed(value, decimals) + ' kn'; }; - - kbn.roundValue = function (num, decimals) { - if (num === null) { return null; } - var n = Math.pow(10, decimals); - return Math.round((n * num).toFixed(decimals)) / n; - }; - - kbn.toFixedScaled = function(value, decimals, scaledDecimals, additionalDecimals, ext) { - if (scaledDecimals === null) { - return kbn.toFixed(value, decimals) + ext; - } else { - return kbn.toFixed(value, scaledDecimals + additionalDecimals) + ext; - } - }; - - kbn.valueFormats.ms = function(size, decimals, scaledDecimals) { - if (size === null) { return ""; } - - if (Math.abs(size) < 1000) { - return kbn.toFixed(size, decimals) + " ms"; - } - // Less than 1 min - else if (Math.abs(size) < 60000) { - return kbn.toFixedScaled(size / 1000, decimals, scaledDecimals, 3, " s"); - } - // Less than 1 hour, devide in minutes - else if (Math.abs(size) < 3600000) { - return kbn.toFixedScaled(size / 60000, decimals, scaledDecimals, 5, " min"); - } - // Less than one day, devide in hours - else if (Math.abs(size) < 86400000) { - return kbn.toFixedScaled(size / 3600000, decimals, scaledDecimals, 7, " hour"); - } - // Less than one year, devide in days - else if (Math.abs(size) < 31536000000) { - return kbn.toFixedScaled(size / 86400000, decimals, scaledDecimals, 8, " day"); - } - - return kbn.toFixedScaled(size / 31536000000, decimals, scaledDecimals, 10, " year"); - }; - - kbn.valueFormats.s = function(size, decimals, scaledDecimals) { - if (size === null) { return ""; } - - if (Math.abs(size) < 600) { - return kbn.toFixed(size, decimals) + " s"; - } - // Less than 1 hour, devide in minutes - else if (Math.abs(size) < 3600) { - return kbn.toFixedScaled(size / 60, decimals, scaledDecimals, 1, " min"); - } - // Less than one day, devide in hours - else if (Math.abs(size) < 86400) { - return kbn.toFixedScaled(size / 3600, decimals, scaledDecimals, 4, " hour"); - } - // Less than one week, devide in days - else if (Math.abs(size) < 604800) { - return kbn.toFixedScaled(size / 86400, decimals, scaledDecimals, 5, " day"); - } - // Less than one year, devide in week - else if (Math.abs(size) < 31536000) { - return kbn.toFixedScaled(size / 604800, decimals, scaledDecimals, 6, " week"); - } - - return kbn.toFixedScaled(size / 3.15569e7, decimals, scaledDecimals, 7, " year"); - }; - - kbn.valueFormats['µs'] = function(size, decimals, scaledDecimals) { - if (size === null) { return ""; } - - if (Math.abs(size) < 1000) { - return kbn.toFixed(size, decimals) + " µs"; - } - else if (Math.abs(size) < 1000000) { - return kbn.toFixedScaled(size / 1000, decimals, scaledDecimals, 3, " ms"); - } - else { - return kbn.toFixedScaled(size / 1000000, decimals, scaledDecimals, 6, " s"); - } - }; - - kbn.valueFormats.ns = function(size, decimals, scaledDecimals) { - if (size === null) { return ""; } - - if (Math.abs(size) < 1000) { - return kbn.toFixed(size, decimals) + " ns"; - } - else if (Math.abs(size) < 1000000) { - return kbn.toFixedScaled(size / 1000, decimals, scaledDecimals, 3, " µs"); - } - else if (Math.abs(size) < 1000000000) { - return kbn.toFixedScaled(size / 1000000, decimals, scaledDecimals, 6, " ms"); - } - else if (Math.abs(size) < 60000000000){ - return kbn.toFixedScaled(size / 1000000000, decimals, scaledDecimals, 9, " s"); - } - else { - return kbn.toFixedScaled(size / 60000000000, decimals, scaledDecimals, 12, " min"); - } - }; - - kbn.slugifyForUrl = function(str) { - return str - .toLowerCase() - .replace(/[^\w ]+/g,'') - .replace(/ +/g,'-'); - }; - - kbn.exportSeriesListToCsv = function(seriesList) { - var text = 'Series;Time;Value\n'; - _.each(seriesList, function(series) { - _.each(series.datapoints, function(dp) { - text += series.alias + ';' + new Date(dp[1]).toISOString() + ';' + dp[0] + '\n'; - }); - }); - var blob = new Blob([text], { type: "text/csv;charset=utf-8" }); - window.saveAs(blob, 'grafana_data_export.csv'); - }; - - kbn.stringToJsRegex = function(str) { - if (str[0] !== '/') { - return new RegExp(str); - } - - var match = str.match(new RegExp('^/(.*?)/(g?i?m?y?)$')); - return new RegExp(match[1], match[2]); - }; - - kbn.getUnitFormats = function() { - return [ - { - text: 'none', - submenu: [ - {text: 'none' , value: 'none'}, - {text: 'short', value: 'short'}, - {text: 'percent', value: 'percent'}, - {text: 'ppm', value: 'ppm'}, - {text: 'dB', value: 'dB'}, - ] - }, - { - text: 'duration', - submenu: [ - {text: 'nanoseconds (ns)' , value: 'ns'}, - {text: 'microseconds (µs)', value: 'µs'}, - {text: 'milliseconds (ms)', value: 'ms'}, - {text: 'seconds (s)', value: 's'}, - {text: 'Hertz (1/s)', value: 'hertz'}, - ] - }, - { - text: 'data', - submenu: [ - {text: 'bits', value: 'bits'}, - {text: 'bytes', value: 'bytes'}, - {text: 'kilobytes', value: 'kbytes'}, - {text: 'megabytes', value: 'mbytes'}, - {text: 'gigabytes', value: 'gbytes'}, - ] - }, - { - text: 'data rate', - submenu: [ - {text: 'packets/sec', value: 'pps'}, - {text: 'bits/sec', value: 'bps'}, - {text: 'bytes/sec', value: 'Bps'}, - ] - }, - { - text: 'energy', - submenu: [ - {text: 'watt (W)', value: 'watt'}, - {text: 'kilowatt (kW)', value: 'kwatt'}, - {text: 'watt-hour (Wh)', value: 'watth'}, - {text: 'kilowatt-hour (kWh)', value: 'kwatth'}, - {text: 'joule (J)', value: 'joule'}, - {text: 'electron volt (eV)', value: 'ev'}, - {text: 'Ampere (A)', value: 'amp'}, - {text: 'Volt (V)', value: 'volt'}, - ] - }, - { - text: 'weather', - submenu: [ - {text: 'Celcius (°C)', value: 'celsius' }, - {text: 'Farenheit (°F)', value: 'farenheit'}, - {text: 'Humidity (%H)', value: 'humidity' }, - {text: 'Pressure (mbar)', value: 'pressurembar' }, - {text: 'Pressure (hPa)', value: 'pressurehpa' }, - ] - }, - { - text: 'velocity', - submenu: [ - {text: 'm/s', value: 'velocityms' }, - {text: 'km/h', value: 'velocitykmh' }, - {text: 'mph', value: 'velocitymph' }, - {text: 'knot (kn)', value: 'velocityknot' }, - ] - }, - ]; - }; - - return kbn; -}); diff --git a/public/app/components/require.config.js b/public/app/components/require.config.js deleted file mode 100644 index 12a72dd42e3ec..0000000000000 --- a/public/app/components/require.config.js +++ /dev/null @@ -1,102 +0,0 @@ -require.config({ - urlArgs: 'bust=' + (new Date().getTime()), - baseUrl: 'public/app', - - paths: { - config: 'components/config', - settings: 'components/settings', - kbn: 'components/kbn', - store: 'components/store', - - text: '../vendor/requirejs-text/text', - moment: '../vendor/moment', - filesaver: '../vendor/filesaver', - ZeroClipboard: '../vendor/ZeroClipboard', - angular: '../vendor/angular/angular', - 'angular-route': '../vendor/angular-route/angular-route', - 'angular-sanitize': '../vendor/angular-sanitize/angular-sanitize', - 'angular-dragdrop': '../vendor/angular-native-dragdrop/draganddrop', - 'angular-strap': '../vendor/angular-other/angular-strap', - timepicker: '../vendor/angular-other/timepicker', - datepicker: '../vendor/angular-other/datepicker', - bindonce: '../vendor/angular-bindonce/bindonce', - crypto: '../vendor/crypto.min', - spectrum: '../vendor/spectrum', - - lodash: 'components/lodash.extended', - 'lodash-src': '../vendor/lodash', - bootstrap: '../vendor/bootstrap/bootstrap', - - jquery: '../vendor/jquery/dist/jquery', - - 'extend-jquery': 'components/extend-jquery', - - 'jquery.flot': '../vendor/flot/jquery.flot', - 'jquery.flot.pie': '../vendor/flot/jquery.flot.pie', - 'jquery.flot.events': '../vendor/flot/jquery.flot.events', - 'jquery.flot.selection': '../vendor/flot/jquery.flot.selection', - 'jquery.flot.stack': '../vendor/flot/jquery.flot.stack', - 'jquery.flot.stackpercent':'../vendor/flot/jquery.flot.stackpercent', - 'jquery.flot.time': '../vendor/flot/jquery.flot.time', - 'jquery.flot.crosshair': '../vendor/flot/jquery.flot.crosshair', - 'jquery.flot.fillbelow': '../vendor/flot/jquery.flot.fillbelow', - - modernizr: '../vendor/modernizr-2.6.1', - - 'bootstrap-tagsinput': '../vendor/tagsinput/bootstrap-tagsinput', - }, - shim: { - - spectrum: { - deps: ['jquery'] - }, - - crypto: { - exports: 'Crypto' - }, - - ZeroClipboard: { - exports: 'ZeroClipboard' - }, - - angular: { - deps: ['jquery','config'], - exports: 'angular' - }, - - bootstrap: { - deps: ['jquery'] - }, - - modernizr: { - exports: 'Modernizr' - }, - - jquery: { - exports: 'jQuery' - }, - - // simple dependency declaration - // - 'jquery.flot': ['jquery'], - 'jquery.flot.pie': ['jquery', 'jquery.flot'], - 'jquery.flot.events': ['jquery', 'jquery.flot'], - 'jquery.flot.selection':['jquery', 'jquery.flot'], - 'jquery.flot.stack': ['jquery', 'jquery.flot'], - 'jquery.flot.stackpercent':['jquery', 'jquery.flot'], - 'jquery.flot.time': ['jquery', 'jquery.flot'], - 'jquery.flot.crosshair':['jquery', 'jquery.flot'], - 'jquery.flot.fillbelow':['jquery', 'jquery.flot'], - 'angular-dragdrop': ['jquery', 'angular'], - 'angular-mocks': ['angular'], - 'angular-sanitize': ['angular'], - 'angular-route': ['angular'], - 'angular-strap': ['angular', 'bootstrap','timepicker', 'datepicker'], - 'bindonce': ['angular'], - - timepicker: ['jquery', 'bootstrap'], - datepicker: ['jquery', 'bootstrap'], - - 'bootstrap-tagsinput': ['jquery'], - }, -}); diff --git a/public/app/components/settings.js b/public/app/components/settings.js deleted file mode 100644 index b3a04bb3eecb9..0000000000000 --- a/public/app/components/settings.js +++ /dev/null @@ -1,57 +0,0 @@ -define([ - 'lodash', -], -function (_) { - "use strict"; - - return function Settings (options) { - /** - * To add a setting, you MUST define a default. Also, - * THESE ARE ONLY DEFAULTS. - * They are overridden by config.js in the root directory - * @type {Object} - */ - var defaults = { - datasources : {}, - window_title_prefix : 'Grafana - ', - panels : { - 'graph': { path: 'panels/graph', name: 'Graph' }, - 'singlestat': { path: 'panels/singlestat', name: 'Single stat' }, - 'text': { path: 'panels/text', name: 'Text' }, - 'dashlist': { path: 'panels/dashlist', name: 'Dashboard list' }, - }, - new_panel_title: 'no title (click here)', - plugins: {}, - default_route: '/dashboard/file/default.json', - playlist_timespan: "1m", - unsaved_changes_warning: true, - search: { max_results: 100 }, - appSubUrl: "" - }; - - var settings = _.extend({}, defaults, options); - - // var parseBasicAuth = function(datasource) { - // var passwordEnd = datasource.url.indexOf('@'); - // if (passwordEnd > 0) { - // var userStart = datasource.url.indexOf('//') + 2; - // var userAndPassword = datasource.url.substring(userStart, passwordEnd); - // var bytes = crypto.charenc.Binary.stringToBytes(userAndPassword); - // datasource.basicAuth = crypto.util.bytesToBase64(bytes); - // - // var urlHead = datasource.url.substring(0, userStart); - // datasource.url = urlHead + datasource.url.substring(passwordEnd + 1); - // } - // - // return datasource; - // }; - // - // _.each(settings.datasources, function(datasource, key) { - // datasource.name = key; - // if (datasource.url) { parseBasicAuth(datasource); } - // if (datasource.type === 'influxdb') { parseMultipleHosts(datasource); } - // }); - - return settings; - }; -}); diff --git a/public/app/controllers/all.js b/public/app/controllers/all.js deleted file mode 100644 index 99b9a496484f7..0000000000000 --- a/public/app/controllers/all.js +++ /dev/null @@ -1,12 +0,0 @@ -define([ - './grafanaCtrl', - './pulldown', - './search', - './metricKeys', - './inspectCtrl', - './jsonEditorCtrl', - './loginCtrl', - './resetPasswordCtrl', - './sidemenuCtrl', - './errorCtrl', -], function () {}); diff --git a/public/app/controllers/console-ctrl.js b/public/app/controllers/console-ctrl.js deleted file mode 100644 index 8a673d9947666..0000000000000 --- a/public/app/controllers/console-ctrl.js +++ /dev/null @@ -1,108 +0,0 @@ -define([ - 'angular', - 'lodash', - 'moment', - 'store' -], -function (angular, _, moment, store) { - 'use strict'; - - var module = angular.module('grafana.controllers'); - var consoleEnabled = store.getBool('grafanaConsole'); - - if (!consoleEnabled) { - return; - } - - var events = []; - - function ConsoleEvent(type, title, data) { - this.type = type; - this.title = title; - this.data = data; - this.time = moment().format('hh:mm:ss'); - - if (data.config) { - this.method = data.config.method; - this.elapsed = (new Date().getTime() - data.config.$grafana_timestamp) + ' ms'; - if (data.config.params && data.config.params.q) { - this.field2 = data.config.params.q; - } - if (_.isString(data.config.data)) { - this.field2 = data.config.data; - } - if (data.status !== 200) { - this.error = true; - this.field3 = data.data; - } - - if (_.isArray(data.data)) { - this.extractTimeseriesInfo(data.data); - } - } - } - - ConsoleEvent.prototype.extractTimeseriesInfo = function(series) { - if (series.length === 0) { - return; - } - - var points = 0; - var ok = false; - - if (series[0].datapoints) { - points = _.reduce(series, function(memo, val) { - return memo + val.datapoints.length; - }, 0); - ok = true; - } - if (series[0].columns) { - points = _.reduce(series, function(memo, val) { - return memo + val.points.length; - }, 0); - ok = true; - } - - if (ok) { - this.field1 = '(' + series.length + ' series'; - this.field1 += ', ' + points + ' points)'; - } - }; - - module.config(function($provide, $httpProvider) { - $provide.factory('mupp', function($q) { - return { - 'request': function(config) { - if (config.inspect) { - config.$grafana_timestamp = new Date().getTime(); - } - return config; - }, - 'response': function(response) { - if (response.config.inspect) { - events.push(new ConsoleEvent(response.config.inspect.type, response.config.url, response)); - } - return response; - }, - 'requestError': function(rejection) { - console.log('requestError', rejection); - return $q.reject(rejection); - }, - 'responseError': function (rejection) { - var inspect = rejection.config.inspect || { type: 'error' }; - events.push(new ConsoleEvent(inspect.type, rejection.config.url, rejection)); - return $q.reject(rejection); - } - }; - }); - - $httpProvider.interceptors.push('mupp'); - }); - - module.controller('ConsoleCtrl', function($scope) { - - $scope.events = events; - - }); - -}); diff --git a/public/app/controllers/inspectCtrl.js b/public/app/controllers/inspectCtrl.js deleted file mode 100644 index d6034516a6b2c..0000000000000 --- a/public/app/controllers/inspectCtrl.js +++ /dev/null @@ -1,86 +0,0 @@ -define([ - 'angular', - 'lodash' -], -function (angular, _) { - 'use strict'; - - var module = angular.module('grafana.controllers'); - - module.controller('InspectCtrl', function($scope) { - var model = $scope.inspector; - - function getParametersFromQueryString(queryString) { - var result = []; - var parameters = queryString.split("&"); - for (var i = 0; i < parameters.length; i++) { - var keyValue = parameters[i].split("="); - if (keyValue[1].length > 0) { - result.push({ key: keyValue[0], value: window.unescape(keyValue[1]) }); - } - } - return result; - } - - $scope.init = function () { - $scope.editor = { index: 0 }; - - if (!model.error) { - return; - } - - if (_.isString(model.error.data)) { - $scope.response = model.error.data; - } - - if (model.error.config && model.error.config.params) { - $scope.request_parameters = _.map(model.error.config.params, function(value, key) { - return { key: key, value: value}; - }); - } - - if (model.error.stack) { - $scope.editor.index = 2; - $scope.stack_trace = model.error.stack; - $scope.message = model.error.message; - } - else if (model.error.config && model.error.config.data) { - $scope.editor.index = 1; - - $scope.request_parameters = getParametersFromQueryString(model.error.config.data); - - if (model.error.data.indexOf('DOCTYPE') !== -1) { - $scope.response_html = model.error.data; - } - } - }; - - }); - - angular - .module('grafana.directives') - .directive('iframeContent', function($parse) { - return { - restrict: 'A', - link: function($scope, elem, attrs) { - var getter = $parse(attrs.iframeContent), value = getter($scope); - - $scope.$on("$destroy",function() { - elem.remove(); - }); - - var iframe = document.createElement('iframe'); - iframe.width = '100%'; - iframe.height = '400px'; - iframe.style.border = 'none'; - iframe.src = 'about:blank'; - elem.append(iframe); - - iframe.contentWindow.document.open('text/html', 'replace'); - iframe.contentWindow.document.write(value); - iframe.contentWindow.document.close(); - } - }; - }); - -}); diff --git a/public/app/controllers/metricKeys.js b/public/app/controllers/metricKeys.js deleted file mode 100644 index be743112cf869..0000000000000 --- a/public/app/controllers/metricKeys.js +++ /dev/null @@ -1,186 +0,0 @@ -define([ - 'angular', - 'lodash', - 'config' -], -function (angular, _, config) { - 'use strict'; - - var module = angular.module('grafana.controllers'); - - module.controller('MetricKeysCtrl', function($scope, $http, $q) { - var elasticSearchUrlForMetricIndex = config.elasticsearch + '/' + config.grafana_metrics_index + '/'; - var httpOptions = {}; - if (config.elasticsearchBasicAuth) { - httpOptions.withCredentials = true; - httpOptions.headers = { - "Authorization": "Basic " + config.elasticsearchBasicAuth - }; - } - $scope.init = function () { - $scope.metricPath = "prod.apps.api.boobarella.*"; - $scope.metricCounter = 0; - }; - - $scope.createIndex = function () { - $scope.errorText = null; - $scope.infoText = null; - - deleteIndex() - .then(createIndex) - .then(function () { - $scope.infoText = "Index created!"; - }) - .then(null, function (err) { - $scope.errorText = angular.toJson(err); - }); - }; - - $scope.loadMetricsFromPath = function() { - $scope.errorText = null; - $scope.infoText = null; - $scope.metricCounter = 0; - - return loadMetricsRecursive($scope.metricPath) - .then(function() { - $scope.infoText = "Indexing completed!"; - }, function(err) { - $scope.errorText = "Error: " + err; - }); - }; - - $scope.loadAll = function() { - $scope.infoText = "Fetching all metrics from graphite..."; - - getFromEachGraphite('/metrics/index.json', saveMetricsArray) - .then(function() { - $scope.infoText = "Indexing complete!"; - }).then(null, function(err) { - $scope.errorText = err; - }); - }; - - function getFromEachGraphite(request, data_callback, error_callback) { - return $q.all(_.map(config.datasources, function(datasource) { - if (datasource.type = 'graphite') { - return $http.get(datasource.url + request) - .then(data_callback, error_callback); - } - })); - } - - function saveMetricsArray(data, currentIndex) { - if (!data && !data.data && data.data.length === 0) { - return $q.reject('No metrics from graphite'); - } - - if (data.data.length === currentIndex) { - return $q.when('done'); - } - - currentIndex = currentIndex || 0; - - return saveMetricKey(data.data[currentIndex]) - .then(function() { - return saveMetricsArray(data, currentIndex + 1); - }); - } - - function deleteIndex() - { - var deferred = $q.defer(); - $http.delete(elasticSearchUrlForMetricIndex, httpOptions) - .success(function() { - deferred.resolve('ok'); - }) - .error(function(data, status) { - if (status === 404) { - deferred.resolve('ok'); - } - else { - deferred.reject('elastic search returned unexpected error'); - } - }); - - return deferred.promise; - } - - function createIndex() - { - return $http.put(elasticSearchUrlForMetricIndex, { - settings: { - analysis: { - analyzer: { - metric_path_ngram : { tokenizer : "my_ngram_tokenizer" } - }, - tokenizer: { - my_ngram_tokenizer : { - type : "nGram", - min_gram : "3", - max_gram : "8", - token_chars: ["letter", "digit", "punctuation", "symbol"] - } - } - } - }, - mappings: { - metricKey: { - properties: { - metricPath: { - type: "multi_field", - fields: { - "metricPath": { type: "string", index: "analyzed", index_analyzer: "standard" }, - "metricPath_ng": { type: "string", index: "analyzed", index_analyzer: "metric_path_ngram" } - } - } - } - } - } - }, httpOptions); - } - - function receiveMetric(result) { - var data = result.data; - if (!data || data.length === 0) { - console.log('no data'); - return; - } - - var funcs = _.map(data, function(metric) { - if (metric.expandable) { - return loadMetricsRecursive(metric.id + ".*"); - } - if (metric.leaf) { - return saveMetricKey(metric.id); - } - }); - - return $q.all(funcs); - } - - function saveMetricKey(metricId) { - - // Create request with id as title. Rethink this. - var request = $scope.ejs.Document(config.grafana_metrics_index, 'metricKey', metricId).source({ - metricPath: metricId - }); - - return request.doIndex( - function() { - $scope.infoText = "Indexing " + metricId; - $scope.metricCounter = $scope.metricCounter + 1; - }, - function() { - $scope.errorText = "failed to save metric " + metricId; - } - ); - } - - function loadMetricsRecursive(metricPath) - { - return getFromEachGraphite('/metrics/find/?query=' + metricPath, receiveMetric); - } - - }); - -}); diff --git a/public/app/controllers/pulldown.js b/public/app/controllers/pulldown.js deleted file mode 100644 index 898d80c047943..0000000000000 --- a/public/app/controllers/pulldown.js +++ /dev/null @@ -1,42 +0,0 @@ -define([ - 'angular', - 'app', - 'lodash' -], -function (angular, app, _) { - 'use strict'; - - var module = angular.module('grafana.controllers'); - - module.controller('PulldownCtrl', function($scope, $rootScope, $timeout) { - var _d = { - collapse: false, - notice: false, - enable: true - }; - - _.defaults($scope.pulldown,_d); - - $scope.init = function() { - // Provide a combined skeleton for panels that must interact with panel and row. - // This might create name spacing issues. - $scope.panel = $scope.pulldown; - $scope.row = $scope.pulldown; - }; - - $scope.toggle_pulldown = function(pulldown) { - pulldown.collapse = pulldown.collapse ? false : true; - if (!pulldown.collapse) { - $timeout(function() { - $scope.$broadcast('render'); - }); - } else { - $scope.row.notice = false; - } - }; - - $scope.init(); - - }); - -}); \ No newline at end of file diff --git a/public/app/components/config.js b/public/app/core/config.js similarity index 88% rename from public/app/components/config.js rename to public/app/core/config.js index e26a55e097b97..f8e7bb228a2c5 100644 --- a/public/app/components/config.js +++ b/public/app/core/config.js @@ -1,5 +1,5 @@ define([ - 'settings', + 'app/core/settings', ], function (Settings) { "use strict"; diff --git a/public/app/core/controllers/all.js b/public/app/core/controllers/all.js new file mode 100644 index 0000000000000..d22010cffdc88 --- /dev/null +++ b/public/app/core/controllers/all.js @@ -0,0 +1,12 @@ +define([ + './grafana_ctrl', + './search_ctrl', + './inspect_ctrl', + './json_editor_ctrl', + './login_ctrl', + './invited_ctrl', + './signup_ctrl', + './reset_password_ctrl', + './sidemenu_ctrl', + './error_ctrl', +], function () {}); diff --git a/public/app/controllers/errorCtrl.js b/public/app/core/controllers/error_ctrl.js similarity index 58% rename from public/app/controllers/errorCtrl.js rename to public/app/core/controllers/error_ctrl.js index 9e70efb3ee42b..9816928fa6c88 100644 --- a/public/app/controllers/errorCtrl.js +++ b/public/app/core/controllers/error_ctrl.js @@ -1,14 +1,11 @@ define([ 'angular', - 'app', - 'lodash' + '../core_module', ], -function (angular) { +function (angular, coreModule) { 'use strict'; - var module = angular.module('grafana.controllers'); - - module.controller('ErrorCtrl', function($scope, contextSrv) { + coreModule.controller('ErrorCtrl', function($scope, contextSrv) { var showSideMenu = contextSrv.sidemenu; contextSrv.sidemenu = false; diff --git a/public/app/controllers/grafanaCtrl.js b/public/app/core/controllers/grafana_ctrl.js similarity index 87% rename from public/app/controllers/grafanaCtrl.js rename to public/app/core/controllers/grafana_ctrl.js index 16e505a68dcda..baafd15938e59 100644 --- a/public/app/controllers/grafanaCtrl.js +++ b/public/app/core/controllers/grafana_ctrl.js @@ -1,16 +1,15 @@ define([ 'angular', - 'config', 'lodash', 'jquery', - 'store', + '../core_module', + 'app/core/config', + 'app/core/store', ], -function (angular, config, _, $, store) { +function (angular, _, $, coreModule, config, store) { "use strict"; - var module = angular.module('grafana.controllers'); - - module.controller('GrafanaCtrl', function($scope, alertSrv, utilSrv, $rootScope, $controller, contextSrv) { + coreModule.controller('GrafanaCtrl', function($scope, alertSrv, utilSrv, $rootScope, $controller, contextSrv) { $scope.init = function() { $scope.contextSrv = contextSrv; @@ -33,9 +32,16 @@ function (angular, config, _, $, store) { $controller('DashboardCtrl', { $scope: viewScope }).init(dashboardData); }; - $rootScope.onAppEvent = function(name, callback) { + $rootScope.onAppEvent = function(name, callback, localScope) { var unbind = $rootScope.$on(name, callback); - this.$on('$destroy', unbind); + var callerScope = this; + if (callerScope.$id === 1 && !localScope) { + console.log('warning rootScope onAppEvent called without localscope'); + } + if (localScope) { + callerScope = localScope; + } + callerScope.$on('$destroy', unbind); }; $rootScope.appEvent = function(name, payload) { diff --git a/public/app/core/controllers/inspect_ctrl.js b/public/app/core/controllers/inspect_ctrl.js new file mode 100644 index 0000000000000..81cfaf64a85bd --- /dev/null +++ b/public/app/core/controllers/inspect_ctrl.js @@ -0,0 +1,67 @@ +define([ + 'angular', + 'lodash', + 'jquery', + '../core_module', +], +function (angular, _, $, coreModule) { + 'use strict'; + + coreModule.controller('InspectCtrl', function($scope) { + var model = $scope.inspector; + + function getParametersFromQueryString(queryString) { + var result = []; + var parameters = queryString.split("&"); + for (var i = 0; i < parameters.length; i++) { + var keyValue = parameters[i].split("="); + if (keyValue[1].length > 0) { + result.push({ key: keyValue[0], value: window.unescape(keyValue[1]) }); + } + } + return result; + } + + $scope.init = function () { + $scope.editor = { index: 0 }; + + if (!model.error) { + return; + } + + if (_.isString(model.error.data)) { + $scope.response = $("
    " + model.error.data + "
    ").text(); + } else if (model.error.data) { + $scope.response = angular.toJson(model.error.data, true); + } else if (model.error.message) { + $scope.message = model.error.message; + } + + if (model.error.config && model.error.config.params) { + $scope.request_parameters = _.map(model.error.config.params, function(value, key) { + return { key: key, value: value}; + }); + } + + if (model.error.stack) { + $scope.editor.index = 2; + $scope.stack_trace = model.error.stack; + $scope.message = model.error.message; + } + + if (model.error.config && model.error.config.data) { + $scope.editor.index = 1; + + if (_.isString(model.error.config.data)) { + $scope.request_parameters = getParametersFromQueryString(model.error.config.data); + } else { + $scope.request_parameters = _.map(model.error.config.data, function(value, key) { + return {key: key, value: angular.toJson(value, true)}; + }); + } + } + }; + + }); + +}); diff --git a/public/app/core/controllers/invited_ctrl.js b/public/app/core/controllers/invited_ctrl.js new file mode 100644 index 0000000000000..540cb01ca1cea --- /dev/null +++ b/public/app/core/controllers/invited_ctrl.js @@ -0,0 +1,38 @@ +define([ + 'angular', + '../core_module', + 'app/core/config', +], +function (angular, coreModule, config) { + 'use strict'; + + coreModule.controller('InvitedCtrl', function($scope, $routeParams, contextSrv, backendSrv) { + contextSrv.sidemenu = false; + $scope.formModel = {}; + + $scope.init = function() { + backendSrv.get('/api/user/invite/' + $routeParams.code).then(function(invite) { + $scope.formModel.name = invite.name; + $scope.formModel.email = invite.email; + $scope.formModel.username = invite.email; + $scope.formModel.inviteCode = $routeParams.code; + + $scope.greeting = invite.name || invite.email || invite.username; + $scope.invitedBy = invite.invitedBy; + }); + }; + + $scope.submit = function() { + if (!$scope.inviteForm.$valid) { + return; + } + + backendSrv.post('/api/user/invite/complete', $scope.formModel).then(function() { + window.location.href = config.appSubUrl + '/'; + }); + }; + + $scope.init(); + + }); +}); diff --git a/public/app/controllers/jsonEditorCtrl.js b/public/app/core/controllers/json_editor_ctrl.js similarity index 68% rename from public/app/controllers/jsonEditorCtrl.js rename to public/app/core/controllers/json_editor_ctrl.js index 60bda8514b7f2..0bfd5fcfb056a 100644 --- a/public/app/controllers/jsonEditorCtrl.js +++ b/public/app/core/controllers/json_editor_ctrl.js @@ -1,13 +1,11 @@ define([ 'angular', - 'lodash' + '../core_module', ], -function (angular) { +function (angular, coreModule) { 'use strict'; - var module = angular.module('grafana.controllers'); - - module.controller('JsonEditorCtrl', function($scope) { + coreModule.controller('JsonEditorCtrl', function($scope) { $scope.json = angular.toJson($scope.object, true); $scope.canUpdate = $scope.updateHandler !== void 0; diff --git a/public/app/controllers/loginCtrl.js b/public/app/core/controllers/login_ctrl.js similarity index 81% rename from public/app/controllers/loginCtrl.js rename to public/app/core/controllers/login_ctrl.js index 40e8009b399ec..22cb2c6f04b66 100644 --- a/public/app/controllers/loginCtrl.js +++ b/public/app/core/controllers/login_ctrl.js @@ -1,13 +1,12 @@ define([ 'angular', - 'config', + '../core_module', + 'app/core/config', ], -function (angular, config) { +function (angular, coreModule, config) { 'use strict'; - var module = angular.module('grafana.controllers'); - - module.controller('LoginCtrl', function($scope, backendSrv, contextSrv, $location) { + coreModule.controller('LoginCtrl', function($scope, backendSrv, contextSrv, $location) { $scope.formModel = { user: '', email: '', @@ -58,8 +57,12 @@ function (angular, config) { return; } - backendSrv.post('/api/user/signup', $scope.formModel).then(function() { - window.location.href = config.appSubUrl + '/'; + backendSrv.post('/api/user/signup', $scope.formModel).then(function(result) { + if (result.status === 'SignUpCreated') { + $location.path('/signup').search({email: $scope.formModel.email}); + } else { + window.location.href = config.appSubUrl + '/'; + } }); }; diff --git a/public/app/controllers/resetPasswordCtrl.js b/public/app/core/controllers/reset_password_ctrl.js similarity index 84% rename from public/app/controllers/resetPasswordCtrl.js rename to public/app/core/controllers/reset_password_ctrl.js index ed693f0d45afe..d414b05945877 100644 --- a/public/app/controllers/resetPasswordCtrl.js +++ b/public/app/core/controllers/reset_password_ctrl.js @@ -1,13 +1,11 @@ define([ 'angular', + '../core_module', ], -function (angular) { +function (angular, coreModule) { 'use strict'; - var module = angular.module('grafana.controllers'); - - module.controller('ResetPasswordCtrl', function($scope, contextSrv, backendSrv, $location) { - + coreModule.controller('ResetPasswordCtrl', function($scope, contextSrv, backendSrv, $location) { contextSrv.sidemenu = false; $scope.formModel = {}; $scope.mode = 'send'; diff --git a/public/app/controllers/search.js b/public/app/core/controllers/search_ctrl.js similarity index 92% rename from public/app/controllers/search.js rename to public/app/core/controllers/search_ctrl.js index a762af0f88734..bbf869b9f3881 100644 --- a/public/app/controllers/search.js +++ b/public/app/core/controllers/search_ctrl.js @@ -1,14 +1,13 @@ define([ 'angular', 'lodash', - 'config', + '../core_module', + 'app/core/config', ], -function (angular, _, config) { +function (angular, _, coreModule, config) { 'use strict'; - var module = angular.module('grafana.controllers'); - - module.controller('SearchCtrl', function($scope, $location, $timeout, backendSrv) { + coreModule.controller('SearchCtrl', function($scope, $location, $timeout, backendSrv) { $scope.init = function() { $scope.giveSearchFocus = 0; @@ -17,10 +16,6 @@ function (angular, _, config) { $scope.query = { query: '', tag: [], starred: false }; $scope.currentSearchId = 0; - if ($scope.dashboardViewState.fullscreen) { - $scope.exitFullscreen(); - } - $timeout(function() { $scope.giveSearchFocus = $scope.giveSearchFocus + 1; $scope.query.query = ''; diff --git a/public/app/controllers/sidemenuCtrl.js b/public/app/core/controllers/sidemenu_ctrl.js similarity index 93% rename from public/app/controllers/sidemenuCtrl.js rename to public/app/core/controllers/sidemenu_ctrl.js index b7ba32f0d35d1..c2ee868323fb3 100644 --- a/public/app/controllers/sidemenuCtrl.js +++ b/public/app/core/controllers/sidemenu_ctrl.js @@ -2,14 +2,13 @@ define([ 'angular', 'lodash', 'jquery', - 'config', + '../core_module', + 'app/core/config', ], -function (angular, _, $, config) { +function (angular, _, $, coreModule, config) { 'use strict'; - var module = angular.module('grafana.controllers'); - - module.controller('SideMenuCtrl', function($scope, $location, contextSrv, backendSrv) { + coreModule.controller('SideMenuCtrl', function($scope, $location, contextSrv, backendSrv) { $scope.getUrl = function(url) { return config.appSubUrl + url; diff --git a/public/app/core/controllers/signup_ctrl.ts b/public/app/core/controllers/signup_ctrl.ts new file mode 100644 index 0000000000000..9c18b121612dc --- /dev/null +++ b/public/app/core/controllers/signup_ctrl.ts @@ -0,0 +1,51 @@ +/// + +import angular = require('angular'); +import config = require('app/core/config'); +import coreModule = require('../core_module'); + +export class SignUpCtrl { + + constructor( + private $scope : any, + private $location : any, + private contextSrv : any, + private backendSrv : any) { + + contextSrv.sidemenu = false; + $scope.ctrl = this; + + $scope.formModel = {}; + + var params = $location.search(); + $scope.formModel.orgName = params.email; + $scope.formModel.email = params.email; + $scope.formModel.username = params.email; + $scope.formModel.code = params.code; + + $scope.verifyEmailEnabled = false; + $scope.autoAssignOrg = false; + + backendSrv.get('/api/user/signup/options').then(options => { + $scope.verifyEmailEnabled = options.verifyEmailEnabled; + $scope.autoAssignOrg = options.autoAssignOrg; + }); + } + + submit () { + if (!this.$scope.signUpForm.$valid) { + return; + } + + this.backendSrv.post('/api/user/signup/step2', this.$scope.formModel).then(rsp => { + if (rsp.code === 'redirect-to-select-org') { + window.location.href = config.appSubUrl + '/profile/select-org?signup=1'; + } else { + window.location.href = config.appSubUrl + '/'; + } + }); + }; +} + +coreModule.controller('SignUpCtrl', SignUpCtrl); + diff --git a/public/app/core/core.ts b/public/app/core/core.ts new file mode 100644 index 0000000000000..9b726de271aef --- /dev/null +++ b/public/app/core/core.ts @@ -0,0 +1,28 @@ +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// + +/// +/// +/// +/// + +export * from './directives/array_join' +export * from './directives/give_focus' +export * from './filters/filters' + + diff --git a/public/app/core/core_module.ts b/public/app/core/core_module.ts new file mode 100644 index 0000000000000..926036c5149b3 --- /dev/null +++ b/public/app/core/core_module.ts @@ -0,0 +1,5 @@ +/// + +import angular = require('angular'); + +export = angular.module('grafana.core', ['ngRoute']); diff --git a/public/app/directives/annotationTooltip.js b/public/app/core/directives/annotation_tooltip.js similarity index 68% rename from public/app/directives/annotationTooltip.js rename to public/app/core/directives/annotation_tooltip.js index 25059d0827422..c79ca97189ce3 100644 --- a/public/app/directives/annotationTooltip.js +++ b/public/app/core/directives/annotation_tooltip.js @@ -1,25 +1,34 @@ define([ - 'angular', 'jquery', - 'lodash' + 'lodash', + '../core_module', ], -function (angular, $, _) { +function ($, _, coreModule) { 'use strict'; - angular - .module('grafana.directives') - .directive('annotationTooltip', function($sanitize, dashboardSrv, $compile) { + coreModule.directive('annotationTooltip', function($sanitize, dashboardSrv, $compile) { + + function sanitizeString(str) { + try { + return $sanitize(str); + } + catch(err) { + console.log('Could not sanitize annotation string, html escaping instead'); + return _.escape(str); + } + } + return { link: function (scope, element) { var event = scope.event; - var title = $sanitize(event.title); + var title = sanitizeString(event.title); var dashboard = dashboardSrv.getCurrent(); var time = '' + dashboard.formatDate(event.min) + ''; var tooltip = '
    ' + title + ' ' + time + '
    ' ; if (event.text) { - var text = $sanitize(event.text); + var text = sanitizeString(event.text); tooltip += text.replace(/\n/g, '
    ') + '
    '; } diff --git a/public/app/core/directives/array_join.ts b/public/app/core/directives/array_join.ts new file mode 100644 index 0000000000000..b9fb15406f026 --- /dev/null +++ b/public/app/core/directives/array_join.ts @@ -0,0 +1,34 @@ +/// + +import angular = require('angular'); +import _ = require('lodash'); +import coreModule = require('../core_module'); + +export function arrayJoin() { + 'use strict'; + + return { + restrict: 'A', + require: 'ngModel', + link: function(scope, element, attr, ngModel) { + + function split_array(text) { + return (text || '').split(','); + } + + function join_array(text) { + if (_.isArray(text)) { + return (text || '').join(','); + } else { + return text; + } + } + + ngModel.$parsers.push(split_array); + ngModel.$formatters.push(join_array); + } + }; +} + +coreModule.directive('arrayJoin', arrayJoin); + diff --git a/public/app/core/directives/body_class.js b/public/app/core/directives/body_class.js new file mode 100644 index 0000000000000..3a0699c297ead --- /dev/null +++ b/public/app/core/directives/body_class.js @@ -0,0 +1,40 @@ +define([ + 'lodash', + 'jquery', + '../core_module', +], +function (_, $, coreModule) { + 'use strict'; + + coreModule.directive('bodyClass', function() { + return { + link: function($scope, elem) { + + var lastHideControlsVal; + + // tooltip removal fix + $scope.$on("$routeChangeSuccess", function() { + $("#tooltip, .tooltip").remove(); + }); + + $scope.$watch('dashboard.hideControls', function() { + if (!$scope.dashboard) { + return; + } + + var hideControls = $scope.dashboard.hideControls || $scope.playlist_active; + + if (lastHideControlsVal !== hideControls) { + elem.toggleClass('hide-controls', hideControls); + lastHideControlsVal = hideControls; + } + }); + + $scope.$watch('playlistSrv', function(newValue) { + elem.toggleClass('playlist-active', _.isObject(newValue)); + }); + } + }; + }); + +}); diff --git a/public/app/core/directives/config_modal.js b/public/app/core/directives/config_modal.js new file mode 100644 index 0000000000000..36129b0cc96c3 --- /dev/null +++ b/public/app/core/directives/config_modal.js @@ -0,0 +1,46 @@ +define([ + 'lodash', + 'jquery', + '../core_module', +], +function (_, $, coreModule) { + 'use strict'; + + coreModule.directive('configModal', function($modal, $q, $timeout) { + return { + restrict: 'A', + link: function(scope, elem, attrs) { + var partial = attrs.configModal; + var id = '#' + partial.replace('.html', '').replace(/[\/|\.|:]/g, '-') + '-' + scope.$id; + + elem.bind('click',function() { + if ($(id).length) { + elem.attr('data-target', id).attr('data-toggle', 'modal'); + scope.$apply(function() { scope.$broadcast('modal-opened'); }); + return; + } + + var panelModal = $modal({ + template: partial, + persist: false, + show: false, + scope: scope.$new(), + keyboard: false + }); + + $q.when(panelModal).then(function(modalEl) { + elem.attr('data-target', id).attr('data-toggle', 'modal'); + + $timeout(function () { + if (!modalEl.data('modal').isShown) { + modalEl.modal('show'); + } + }, 50); + }); + + scope.$apply(); + }); + } + }; + }); +}); diff --git a/public/app/directives/confirmClick.js b/public/app/core/directives/confirm_click.js similarity index 75% rename from public/app/directives/confirmClick.js rename to public/app/core/directives/confirm_click.js index a3fe1af29b436..f73a847af4511 100644 --- a/public/app/directives/confirmClick.js +++ b/public/app/core/directives/confirm_click.js @@ -1,13 +1,10 @@ define([ - 'angular', - 'kbn' + '../core_module', ], -function (angular) { +function (coreModule) { 'use strict'; - var module = angular.module('grafana.directives'); - - module.directive('confirmClick', function() { + coreModule.directive('confirmClick', function() { return { restrict: 'A', link: function(scope, elem, attrs) { @@ -23,4 +20,4 @@ function (angular) { }, }; }); -}); \ No newline at end of file +}); diff --git a/public/app/core/directives/dash_edit_link.js b/public/app/core/directives/dash_edit_link.js new file mode 100644 index 0000000000000..9ab74fe6d742e --- /dev/null +++ b/public/app/core/directives/dash_edit_link.js @@ -0,0 +1,105 @@ +define([ + 'jquery', + '../core_module', +], +function ($, coreModule) { + 'use strict'; + + var editViewMap = { + 'settings': { src: 'app/features/dashboard/partials/settings.html', title: "Settings" }, + 'annotations': { src: 'app/features/annotations/partials/editor.html', title: "Annotations" }, + 'templating': { src: 'app/features/templating/partials/editor.html', title: "Templating" } + }; + + coreModule.directive('dashEditorLink', function($timeout) { + return { + restrict: 'A', + link: function(scope, elem, attrs) { + var partial = attrs.dashEditorLink; + + elem.bind('click',function() { + $timeout(function() { + var editorScope = attrs.editorScope === 'isolated' ? null : scope; + scope.appEvent('show-dash-editor', { src: partial, scope: editorScope }); + }); + }); + } + }; + }); + + coreModule.directive('dashEditorView', function($compile, $location) { + return { + restrict: 'A', + link: function(scope, elem) { + var editorScope; + var lastEditor; + + function hideEditorPane() { + if (editorScope) { + scope.appEvent('dash-editor-hidden', lastEditor); + editorScope.dismiss(); + } + } + + function showEditorPane(evt, payload, editview) { + if (editview) { + scope.contextSrv.editview = editViewMap[editview]; + payload.src = scope.contextSrv.editview.src; + } + + if (lastEditor === payload.src) { + hideEditorPane(); + return; + } + + hideEditorPane(); + + lastEditor = payload.src; + editorScope = payload.scope ? payload.scope.$new() : scope.$new(); + + editorScope.dismiss = function() { + editorScope.$destroy(); + elem.empty(); + lastEditor = null; + editorScope = null; + + if (editview) { + var urlParams = $location.search(); + if (editview === urlParams.editview) { + delete urlParams.editview; + $location.search(urlParams); + } + } + }; + + var src = "'" + payload.src + "'"; + var cssClass = payload.cssClass || 'gf-box'; + var view = $('
    '); + + if (payload.cssClass) { + view.addClass(payload.cssClass); + } + + elem.append(view); + $compile(elem.contents())(editorScope); + } + + scope.$watch("dashboardViewState.state.editview", function(newValue, oldValue) { + if (newValue) { + showEditorPane(null, {}, newValue); + } else if (oldValue) { + scope.contextSrv.editview = null; + if (lastEditor === editViewMap[oldValue]) { + hideEditorPane(); + } + } + }); + + scope.contextSrv.editview = null; + scope.$on("$destroy", hideEditorPane); + scope.onAppEvent('hide-dash-editor', hideEditorPane); + scope.onAppEvent('show-dash-editor', showEditorPane); + } + }; + }); +}); diff --git a/public/app/directives/dashUpload.js b/public/app/core/directives/dash_upload.js similarity index 89% rename from public/app/directives/dashUpload.js rename to public/app/core/directives/dash_upload.js index 89d52c75916be..ed4253c71caf7 100644 --- a/public/app/directives/dashUpload.js +++ b/public/app/core/directives/dash_upload.js @@ -1,13 +1,11 @@ define([ - 'angular', - 'kbn' + '../core_module', + 'app/core/utils/kbn', ], -function (angular, kbn) { +function (coreModule, kbn) { 'use strict'; - var module = angular.module('grafana.directives'); - - module.directive('dashUpload', function(timer, alertSrv, $location) { + coreModule.directive('dashUpload', function(timer, alertSrv, $location) { return { restrict: 'A', link: function(scope) { diff --git a/public/app/core/directives/dropdown_typeahead.js b/public/app/core/directives/dropdown_typeahead.js new file mode 100644 index 0000000000000..ad484bb18d77e --- /dev/null +++ b/public/app/core/directives/dropdown_typeahead.js @@ -0,0 +1,123 @@ +define([ + 'lodash', + 'jquery', + '../core_module', +], +function (_, $, coreModule) { + 'use strict'; + + coreModule.directive('dropdownTypeahead', function($compile) { + + var inputTemplate = ''; + + var buttonTemplate = ''; + + return { + scope: { + menuItems: "=dropdownTypeahead", + dropdownTypeaheadOnSelect: "&dropdownTypeaheadOnSelect", + model: '=ngModel' + }, + link: function($scope, elem, attrs) { + var $input = $(inputTemplate); + var $button = $(buttonTemplate); + $input.appendTo(elem); + $button.appendTo(elem); + + if (attrs.linkText) { + $button.html(attrs.linkText); + } + + if (attrs.ngModel) { + $scope.$watch('model', function(newValue) { + _.each($scope.menuItems, function(item) { + _.each(item.submenu, function(subItem) { + if (subItem.value === newValue) { + $button.html(subItem.text); + } + }); + }); + }); + } + + var typeaheadValues = _.reduce($scope.menuItems, function(memo, value, index) { + if (!value.submenu) { + value.click = 'menuItemSelected(' + index + ')'; + memo.push(value.text); + } else { + _.each(value.submenu, function(item, subIndex) { + item.click = 'menuItemSelected(' + index + ',' + subIndex + ')'; + memo.push(value.text + ' ' + item.text); + }); + } + return memo; + }, []); + + $scope.menuItemSelected = function(index, subIndex) { + var menuItem = $scope.menuItems[index]; + var payload = {$item: menuItem}; + if (menuItem.submenu && subIndex !== void 0) { + payload.$subItem = menuItem.submenu[subIndex]; + } + $scope.dropdownTypeaheadOnSelect(payload); + }; + + $input.attr('data-provide', 'typeahead'); + $input.typeahead({ + source: typeaheadValues, + minLength: 1, + items: 10, + updater: function (value) { + var result = {}; + _.each($scope.menuItems, function(menuItem) { + result.$item = menuItem; + + _.each(menuItem.submenu, function(submenuItem) { + if (value === (menuItem.text + ' ' + submenuItem.text)) { + result.$subItem = submenuItem; + } + }); + }); + + if (result.$item) { + $scope.$apply(function() { + $scope.dropdownTypeaheadOnSelect(result); + }); + } + + $input.trigger('blur'); + return ''; + } + }); + + $button.click(function() { + $button.hide(); + $input.show(); + $input.focus(); + }); + + $input.keyup(function() { + elem.toggleClass('open', $input.val() === ''); + }); + + $input.blur(function() { + $input.hide(); + $input.val(''); + $button.show(); + $button.focus(); + // clicking the function dropdown menu wont + // work if you remove class at once + setTimeout(function() { + elem.removeClass('open'); + }, 200); + }); + + $compile(elem.contents())($scope); + } + }; + }); +}); diff --git a/public/app/core/directives/give_focus.ts b/public/app/core/directives/give_focus.ts new file mode 100644 index 0000000000000..1a00f2b707809 --- /dev/null +++ b/public/app/core/directives/give_focus.ts @@ -0,0 +1,28 @@ +/// + +import angular = require('angular'); +import coreModule = require('../core_module'); + +coreModule.directive('giveFocus', function() { + return function(scope, element, attrs) { + element.click(function(e) { + e.stopPropagation(); + }); + + scope.$watch(attrs.giveFocus, function (newValue) { + if (!newValue) { + return; + } + setTimeout(function() { + element.focus(); + var domEl = element[0]; + if (domEl.setSelectionRange) { + var pos = element.val().length * 2; + domEl.setSelectionRange(pos, pos); + } + }, 200); + }, true); + }; +}); + +export default {}; diff --git a/public/app/core/directives/grafana_version_check.js b/public/app/core/directives/grafana_version_check.js new file mode 100644 index 0000000000000..ad50b330587cc --- /dev/null +++ b/public/app/core/directives/grafana_version_check.js @@ -0,0 +1,31 @@ +define([ + '../core_module', +], +function (coreModule) { + 'use strict'; + + coreModule.directive('grafanaVersionCheck', function($http, contextSrv) { + return { + restrict: 'A', + link: function(scope, elem) { + if (contextSrv.version === 'master') { + return; + } + + $http({ method: 'GET', url: 'https://grafanarel.s3.amazonaws.com/latest.json' }) + .then(function(response) { + if (!response.data || !response.data.version) { + return; + } + + if (contextSrv.version !== response.data.version) { + elem.append(' ' + + ' ' + + 'New version available: ' + response.data.version + + ''); + } + }); + } + }; + }); +}); diff --git a/public/app/core/directives/metric_segment.js b/public/app/core/directives/metric_segment.js new file mode 100644 index 0000000000000..1d07544bb2dc7 --- /dev/null +++ b/public/app/core/directives/metric_segment.js @@ -0,0 +1,218 @@ +define([ + 'lodash', + 'jquery', + '../core_module', +], +function (_, $, coreModule) { + 'use strict'; + + coreModule.directive('metricSegment', function($compile, $sce) { + var inputTemplate = ''; + + var buttonTemplate = ''; + + return { + scope: { + segment: "=", + getOptions: "&", + onChange: "&", + }, + link: function($scope, elem) { + var $input = $(inputTemplate); + var $button = $(buttonTemplate); + var segment = $scope.segment; + var options = null; + var cancelBlur = null; + var linkMode = true; + + $input.appendTo(elem); + $button.appendTo(elem); + + $scope.updateVariableValue = function(value) { + if (value === '' || segment.value === value) { + return; + } + + $scope.$apply(function() { + var selected = _.findWhere($scope.altSegments, { value: value }); + if (selected) { + segment.value = selected.value; + segment.html = selected.html; + segment.fake = false; + segment.expandable = selected.expandable; + } + else if (segment.custom !== 'false') { + segment.value = value; + segment.html = $sce.trustAsHtml(value); + segment.expandable = true; + segment.fake = false; + } + + $scope.onChange(); + }); + }; + + $scope.switchToLink = function() { + if (linkMode) { return; } + + clearTimeout(cancelBlur); + cancelBlur = null; + linkMode = true; + $input.hide(); + $button.show(); + $scope.updateVariableValue($input.val()); + }; + + $scope.inputBlur = function() { + // happens long before the click event on the typeahead options + // need to have long delay because the blur + cancelBlur = setTimeout($scope.switchToLink, 100); + }; + + $scope.source = function(query, callback) { + if (options) { return options; } + + $scope.$apply(function() { + $scope.getOptions().then(function(altSegments) { + $scope.altSegments = altSegments; + options = _.map($scope.altSegments, function(alt) { return alt.value; }); + + // add custom values + if (segment.custom !== 'false') { + if (!segment.fake && _.indexOf(options, segment.value) === -1) { + options.unshift(segment.value); + } + } + + callback(options); + }); + }); + }; + + $scope.updater = function(value) { + if (value === segment.value) { + clearTimeout(cancelBlur); + $input.focus(); + return value; + } + + $input.val(value); + $scope.switchToLink(); + + return value; + }; + + $scope.matcher = function(item) { + var str = this.query; + if (str[0] === '/') { str = str.substring(1); } + if (str[str.length - 1] === '/') { str = str.substring(0, str.length-1); } + try { + return item.toLowerCase().match(str); + } catch(e) { + return false; + } + }; + + $input.attr('data-provide', 'typeahead'); + $input.typeahead({ source: $scope.source, minLength: 0, items: 10000, updater: $scope.updater, matcher: $scope.matcher }); + + var typeahead = $input.data('typeahead'); + typeahead.lookup = function () { + this.query = this.$element.val() || ''; + var items = this.source(this.query, $.proxy(this.process, this)); + return items ? this.process(items) : items; + }; + + $button.keydown(function(evt) { + // trigger typeahead on down arrow or enter key + if (evt.keyCode === 40 || evt.keyCode === 13) { + $button.click(); + } + }); + + $button.click(function() { + options = null; + $input.css('width', ($button.width() + 16) + 'px'); + + $button.hide(); + $input.show(); + $input.focus(); + + linkMode = false; + + var typeahead = $input.data('typeahead'); + if (typeahead) { + $input.val(''); + typeahead.lookup(); + } + }); + + $input.blur($scope.inputBlur); + + $compile(elem.contents())($scope); + } + }; + }); + + coreModule.directive('metricSegmentModel', function(uiSegmentSrv, $q) { + return { + template: '', + restrict: 'E', + scope: { + property: "=", + options: "=", + getOptions: "&", + onChange: "&", + }, + link: { + pre: function postLink($scope, elem, attrs) { + + $scope.valueToSegment = function(value) { + var option = _.findWhere($scope.options, {value: value}); + var segment = { + cssClass: attrs.cssClass, + custom: attrs.custom, + value: option ? option.text : value, + }; + return uiSegmentSrv.newSegment(segment); + }; + + $scope.getOptionsInternal = function() { + if ($scope.options) { + var optionSegments = _.map($scope.options, function(option) { + return uiSegmentSrv.newSegment({value: option.text}); + }); + return $q.when(optionSegments); + } else { + return $scope.getOptions(); + } + }; + + $scope.onSegmentChange = function() { + if ($scope.options) { + var option = _.findWhere($scope.options, {text: $scope.segment.value}); + if (option && option.value !== $scope.property) { + $scope.property = option.value; + } else if (attrs.custom !== 'false') { + $scope.property = $scope.segment.value; + } + } else { + $scope.property = $scope.segment.value; + } + + // needs to call this after digest so + // property is synced with outerscope + $scope.$$postDigest(function() { + $scope.onChange(); + }); + }; + + $scope.segment = $scope.valueToSegment($scope.property); + } + } + }; + }); +}); diff --git a/public/app/core/directives/misc.js b/public/app/core/directives/misc.js new file mode 100644 index 0000000000000..0fe18f922f7aa --- /dev/null +++ b/public/app/core/directives/misc.js @@ -0,0 +1,122 @@ +define([ + 'angular', + '../core_module', + 'app/core/utils/kbn', +], +function (angular, coreModule, kbn) { + 'use strict'; + + coreModule.directive('tip', function($compile) { + return { + restrict: 'E', + link: function(scope, elem, attrs) { + var _t = ''; + _t = _t.replace(/{/g, '\\{').replace(/}/g, '\\}'); + elem.replaceWith($compile(angular.element(_t))(scope)); + } + }; + }); + + coreModule.directive('watchChange', function() { + return { + scope: { onchange: '&watchChange' }, + link: function(scope, element) { + element.on('input', function() { + scope.$apply(function () { + scope.onchange({ inputValue: element.val() }); + }); + }); + } + }; + }); + + coreModule.directive('editorOptBool', function($compile) { + return { + restrict: 'E', + link: function(scope, elem, attrs) { + var ngchange = attrs.change ? (' ng-change="' + attrs.change + '"') : ''; + var tip = attrs.tip ? (' ' + attrs.tip + '') : ''; + var showIf = attrs.showIf ? (' ng-show="' + attrs.showIf + '" ') : ''; + + var template = '
    ' + + ' ' + + '' + + ' '; + elem.replaceWith($compile(angular.element(template))(scope)); + } + }; + }); + + coreModule.directive('editorCheckbox', function($compile, $interpolate) { + return { + restrict: 'E', + link: function(scope, elem, attrs) { + var text = $interpolate(attrs.text)(scope); + var model = $interpolate(attrs.model)(scope); + var ngchange = attrs.change ? (' ng-change="' + attrs.change + '"') : ''; + var tip = attrs.tip ? (' ' + attrs.tip + '') : ''; + var label = ''; + + var template = '' + + ' '; + + template = label + template; + elem.replaceWith($compile(angular.element(template))(scope)); + } + }; + }); + + coreModule.directive('gfDropdown', function ($parse, $compile, $timeout) { + function buildTemplate(items, placement) { + var upclass = placement === 'top' ? 'dropup' : ''; + var ul = [ + '' + ]; + + angular.forEach(items, function (item, index) { + if (item.divider) { + return ul.splice(index + 1, 0, '
  • '); + } + + var li = '' + + '' + (item.text || '') + ''; + + if (item.submenu && item.submenu.length) { + li += buildTemplate(item.submenu).join('\n'); + } + + li += ''; + ul.splice(index + 1, 0, li); + }); + return ul; + } + + return { + restrict: 'EA', + scope: true, + link: function postLink(scope, iElement, iAttrs) { + var getter = $parse(iAttrs.gfDropdown), items = getter(scope); + $timeout(function () { + var placement = iElement.data('placement'); + var dropdown = angular.element(buildTemplate(items, placement).join('')); + dropdown.insertAfter(iElement); + $compile(iElement.next('ul.dropdown-menu'))(scope); + }); + + iElement.addClass('dropdown-toggle').attr('data-toggle', 'dropdown'); + } + }; + }); + +}); diff --git a/public/app/core/directives/ng_model_on_blur.js b/public/app/core/directives/ng_model_on_blur.js new file mode 100644 index 0000000000000..c4a645732c57d --- /dev/null +++ b/public/app/core/directives/ng_model_on_blur.js @@ -0,0 +1,56 @@ +define([ + '../core_module', + 'app/core/utils/kbn', + 'app/core/utils/rangeutil', +], +function (coreModule, kbn, rangeUtil) { + 'use strict'; + + coreModule.directive('ngModelOnblur', function() { + return { + restrict: 'A', + priority: 1, + require: 'ngModel', + link: function(scope, elm, attr, ngModelCtrl) { + if (attr.type === 'radio' || attr.type === 'checkbox') { + return; + } + + elm.off('input keydown change'); + elm.bind('blur', function() { + scope.$apply(function() { + ngModelCtrl.$setViewValue(elm.val()); + }); + }); + } + }; + }); + + coreModule.directive('emptyToNull', function () { + return { + restrict: 'A', + require: 'ngModel', + link: function (scope, elm, attrs, ctrl) { + ctrl.$parsers.push(function (viewValue) { + if(viewValue === "") { return null; } + return viewValue; + }); + } + }; + }); + + coreModule.directive('validTimeSpan', function() { + return { + require: 'ngModel', + link: function(scope, elm, attrs, ctrl) { + ctrl.$validators.integer = function(modelValue, viewValue) { + if (ctrl.$isEmpty(modelValue)) { + return true; + } + var info = rangeUtil.describeTextRange(viewValue); + return info.invalid !== true; + }; + } + }; + }); +}); diff --git a/public/app/core/directives/password_strenght.js b/public/app/core/directives/password_strenght.js new file mode 100644 index 0000000000000..b53113f936556 --- /dev/null +++ b/public/app/core/directives/password_strenght.js @@ -0,0 +1,45 @@ +define([ + '../core_module', +], +function (coreModule) { + 'use strict'; + + coreModule.directive('passwordStrength', function() { + var template = '
    ' + + '{{strengthText}}' + + '
    '; + return { + template: template, + scope: { + password: "=", + }, + link: function($scope) { + + $scope.strengthClass = ''; + + function passwordChanged(newValue) { + if (!newValue) { + $scope.strengthText = ""; + $scope.strengthClass = "hidden"; + return; + } + if (newValue.length < 4) { + $scope.strengthText = "strength: weak sauce."; + $scope.strengthClass = "password-strength-bad"; + return; + } + if (newValue.length <= 8) { + $scope.strengthText = "strength: you can do better."; + $scope.strengthClass = "password-strength-ok"; + return; + } + + $scope.strengthText = "strength: strong like a bull."; + $scope.strengthClass = "password-strength-good"; + } + + $scope.$watch("password", passwordChanged); + } + }; + }); +}); diff --git a/public/app/core/directives/spectrum_picker.js b/public/app/core/directives/spectrum_picker.js new file mode 100644 index 0000000000000..92fc3982d7adf --- /dev/null +++ b/public/app/core/directives/spectrum_picker.js @@ -0,0 +1,41 @@ +define([ + 'angular', + '../core_module', + 'spectrum', +], +function (angular, coreModule) { + 'use strict'; + + coreModule.directive('spectrumPicker', function() { + return { + restrict: 'E', + require: 'ngModel', + scope: false, + replace: true, + template: "", + link: function(scope, element, attrs, ngModel) { + var input = element.find('input'); + var options = angular.extend({ + showAlpha: true, + showButtons: false, + color: ngModel.$viewValue, + change: function(color) { + scope.$apply(function() { + ngModel.$setViewValue(color.toRgbString()); + }); + } + }, scope.$eval(attrs.options)); + + ngModel.$render = function() { + input.spectrum('set', ngModel.$viewValue || ''); + }; + + input.spectrum(options); + + scope.$on('$destroy', function() { + input.spectrum('destroy'); + }); + } + }; + }); +}); diff --git a/public/app/directives/tags.js b/public/app/core/directives/tags.js similarity index 94% rename from public/app/directives/tags.js rename to public/app/core/directives/tags.js index f408a5e38642e..01f4c4f3c3755 100644 --- a/public/app/directives/tags.js +++ b/public/app/core/directives/tags.js @@ -1,9 +1,10 @@ define([ 'angular', 'jquery', - 'bootstrap-tagsinput' + '../core_module', + 'bootstrap-tagsinput', ], -function (angular, $) { +function (angular, $, coreModule) { 'use strict'; function djb2(str) { @@ -38,9 +39,7 @@ function (angular, $) { element.css("border-color", borderColor); } - angular - .module('grafana.directives') - .directive('tagColorFromName', function() { + coreModule.directive('tagColorFromName', function() { return { scope: { tagColorFromName: "=" }, link: function (scope, element) { @@ -49,9 +48,7 @@ function (angular, $) { }; }); - angular - .module('grafana.directives') - .directive('bootstrapTagsinput', function() { + coreModule.directive('bootstrapTagsinput', function() { function getItemProperty(scope, property) { if (!property) { diff --git a/public/app/core/directives/topnav.js b/public/app/core/directives/topnav.js new file mode 100644 index 0000000000000..38aa9a9e6b3a9 --- /dev/null +++ b/public/app/core/directives/topnav.js @@ -0,0 +1,50 @@ +define([ + '../core_module', +], +function (coreModule) { + 'use strict'; + + coreModule.directive('topnav', function($rootScope, contextSrv) { + return { + restrict: 'E', + transclude: true, + scope: { + title: "@", + section: "@", + titleAction: "&", + subnav: "=", + }, + template: + '', + link: function(scope, elem, attrs) { + scope.icon = attrs.icon; + scope.contextSrv = contextSrv; + + scope.toggle = function() { + $rootScope.appEvent('toggle-sidemenu'); + }; + } + }; + }); + +}); diff --git a/public/app/core/directives/value_select_dropdown.js b/public/app/core/directives/value_select_dropdown.js new file mode 100644 index 0000000000000..4f5076abd2f21 --- /dev/null +++ b/public/app/core/directives/value_select_dropdown.js @@ -0,0 +1,284 @@ +define([ + 'angular', + 'lodash', + '../core_module', +], +function (angular, _, coreModule) { + 'use strict'; + + coreModule.controller('ValueSelectDropdownCtrl', function($q) { + var vm = this; + + vm.show = function() { + vm.oldVariableText = vm.variable.current.text; + vm.highlightIndex = -1; + + vm.options = vm.variable.options; + vm.selectedValues = _.filter(vm.options, {selected: true}); + + vm.tags = _.map(vm.variable.tags, function(value) { + var tag = { text: value, selected: false }; + _.each(vm.variable.current.tags, function(tagObj) { + if (tagObj.text === value) { + tag = tagObj; + } + }); + return tag; + }); + + vm.search = { + query: '', + options: vm.options.slice(0, Math.min(vm.options.length, 1000)) + }; + + vm.dropdownVisible = true; + }; + + vm.updateLinkText = function() { + var current = vm.variable.current; + + if (current.tags && current.tags.length) { + // filer out values that are in selected tags + var selectedAndNotInTag = _.filter(vm.variable.options, function(option) { + if (!option.selected) { return false; } + for (var i = 0; i < current.tags.length; i++) { + var tag = current.tags[i]; + if (_.indexOf(tag.values, option.value) !== -1) { + return false; + } + } + return true; + }); + + // convert values to text + var currentTexts = _.pluck(selectedAndNotInTag, 'text'); + + // join texts + vm.linkText = currentTexts.join(' + '); + if (vm.linkText.length > 0) { + vm.linkText += ' + '; + } + } else { + vm.linkText = vm.variable.current.text; + } + }; + + vm.clearSelections = function() { + _.each(vm.options, function(option) { + option.selected = false; + }); + + vm.selectionsChanged(false); + }; + + vm.selectTag = function(tag) { + tag.selected = !tag.selected; + var tagValuesPromise; + if (!tag.values) { + tagValuesPromise = vm.getValuesForTag({tagKey: tag.text}); + } else { + tagValuesPromise = $q.when(tag.values); + } + + tagValuesPromise.then(function(values) { + tag.values = values; + tag.valuesText = values.join(' + '); + _.each(vm.options, function(option) { + if (_.indexOf(tag.values, option.value) !== -1) { + option.selected = tag.selected; + } + }); + + vm.selectionsChanged(false); + }); + }; + + vm.keyDown = function (evt) { + if (evt.keyCode === 27) { + vm.hide(); + } + if (evt.keyCode === 40) { + vm.moveHighlight(1); + } + if (evt.keyCode === 38) { + vm.moveHighlight(-1); + } + if (evt.keyCode === 13) { + if (vm.search.options.length === 0) { + vm.commitChanges(); + } else { + vm.selectValue(vm.search.options[vm.highlightIndex], {}, true, false); + } + } + if (evt.keyCode === 32) { + vm.selectValue(vm.search.options[vm.highlightIndex], {}, false, false); + } + }; + + vm.moveHighlight = function(direction) { + vm.highlightIndex = (vm.highlightIndex + direction) % vm.search.options.length; + }; + + vm.selectValue = function(option, event, commitChange, excludeOthers) { + if (!option) { return; } + + option.selected = !option.selected; + + commitChange = commitChange || false; + excludeOthers = excludeOthers || false; + + var setAllExceptCurrentTo = function(newValue) { + _.each(vm.options, function(other) { + if (option !== other) { other.selected = newValue; } + }); + }; + + // commit action (enter key), should not deselect it + if (commitChange) { + option.selected = true; + } + + if (option.text === 'All' || excludeOthers) { + setAllExceptCurrentTo(false); + commitChange = true; + } + else if (!vm.variable.multi) { + setAllExceptCurrentTo(false); + commitChange = true; + } else if (event.ctrlKey || event.metaKey || event.shiftKey) { + commitChange = true; + setAllExceptCurrentTo(false); + } + + vm.selectionsChanged(commitChange); + }; + + vm.selectionsChanged = function(commitChange) { + vm.selectedValues = _.filter(vm.options, {selected: true}); + + if (vm.selectedValues.length > 1 && vm.selectedValues.length !== vm.options.length) { + if (vm.selectedValues[0].text === 'All') { + vm.selectedValues[0].selected = false; + vm.selectedValues = vm.selectedValues.slice(1, vm.selectedValues.length); + } + } + + // validate selected tags + _.each(vm.tags, function(tag) { + if (tag.selected) { + _.each(tag.values, function(value) { + if (!_.findWhere(vm.selectedValues, {value: value})) { + tag.selected = false; + } + }); + } + }); + + vm.selectedTags = _.filter(vm.tags, {selected: true}); + vm.variable.current.value = _.pluck(vm.selectedValues, 'value'); + vm.variable.current.text = _.pluck(vm.selectedValues, 'text').join(' + '); + vm.variable.current.tags = vm.selectedTags; + + // only single value + if (vm.selectedValues.length === 1) { + vm.variable.current.value = vm.selectedValues[0].value; + } + + if (commitChange) { + vm.commitChanges(); + } + }; + + vm.commitChanges = function() { + // if we have a search query and no options use that + if (vm.search.options.length === 0 && vm.search.query.length > 0) { + vm.variable.current = {text: vm.search.query, value: vm.search.query}; + } + else if (vm.selectedValues.length === 0) { + // make sure one option is selected + vm.options[0].selected = true; + vm.selectionsChanged(false); + } + + vm.dropdownVisible = false; + vm.updateLinkText(); + + if (vm.variable.current.text !== vm.oldVariableText) { + vm.onUpdated(); + } + }; + + vm.queryChanged = function() { + vm.highlightIndex = -1; + vm.search.options = _.filter(vm.options, function(option) { + return option.text.toLowerCase().indexOf(vm.search.query.toLowerCase()) !== -1; + }); + + vm.search.options = vm.search.options.slice(0, Math.min(vm.search.options.length, 1000)); + }; + + vm.init = function() { + vm.selectedTags = vm.variable.current.tags || []; + vm.updateLinkText(); + }; + + }); + + coreModule.directive('valueSelectDropdown', function($compile, $window, $timeout, $rootScope) { + return { + scope: { variable: "=", onUpdated: "&", getValuesForTag: "&" }, + templateUrl: 'app/partials/valueSelectDropdown.html', + controller: 'ValueSelectDropdownCtrl', + controllerAs: 'vm', + bindToController: true, + link: function(scope, elem) { + var bodyEl = angular.element($window.document.body); + var linkEl = elem.find('.variable-value-link'); + var inputEl = elem.find('input'); + + function openDropdown() { + inputEl.css('width', Math.max(linkEl.width(), 30) + 'px'); + + inputEl.show(); + linkEl.hide(); + + inputEl.focus(); + $timeout(function() { bodyEl.on('click', bodyOnClick); }, 0, false); + } + + function switchToLink() { + inputEl.hide(); + linkEl.show(); + bodyEl.off('click', bodyOnClick); + } + + function bodyOnClick (e) { + if (elem.has(e.target).length === 0) { + scope.$apply(function() { + scope.vm.commitChanges(); + }); + } + } + + scope.$watch('vm.dropdownVisible', function(newValue) { + if (newValue) { + openDropdown(); + } else { + switchToLink(); + } + }); + + var cleanUp = $rootScope.$on('template-variable-value-updated', function() { + scope.vm.updateLinkText(); + }); + + scope.$on("$destroy", function() { + cleanUp(); + }); + + scope.vm.init(); + }, + }; + }); + +}); diff --git a/public/app/core/filters/filters.ts b/public/app/core/filters/filters.ts new file mode 100644 index 0000000000000..7ebba90368660 --- /dev/null +++ b/public/app/core/filters/filters.ts @@ -0,0 +1,74 @@ +/// + +import angular = require('angular'); +import jquery = require('jquery'); +import moment = require('moment'); +import _ = require('lodash'); +import coreModule = require('../core_module'); + +coreModule.filter('stringSort', function() { + return function(input) { + return input.sort(); + }; +}); + +coreModule.filter('slice', function() { + return function(arr, start, end) { + if (!_.isUndefined(arr)) { + return arr.slice(start, end); + } + }; +}); + +coreModule.filter('stringify', function() { + return function(arr) { + if (_.isObject(arr) && !_.isArray(arr)) { + return angular.toJson(arr); + } else { + return _.isNull(arr) ? null : arr.toString(); + } + }; +}); + +coreModule.filter('moment', function() { + return function(date, mode) { + switch (mode) { + case 'ago': + return moment(date).fromNow(); + } + return moment(date).fromNow(); + }; +}); + +coreModule.filter('noXml', function() { + var noXml = function(text) { + return _.isString(text) + ? text + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/'/g, ''') + .replace(/"/g, '"') + : text; + }; + return function(text) { + return _.isArray(text) + ? _.map(text, noXml) + : noXml(text); + }; +}); + +coreModule.filter('interpolateTemplateVars', function (templateSrv) { + var filterFunc : any = function (text, scope) { + if (scope.panel) { + return templateSrv.replaceWithText(text, scope.panel.scopedVars); + } else { + return templateSrv.replaceWithText(text, scope.row.scopedVars); + } + }; + + filterFunc.$stateful = true; + return filterFunc; +}); + +export {}; diff --git a/public/app/components/extend-jquery.js b/public/app/core/jquery_extended.js similarity index 97% rename from public/app/components/extend-jquery.js rename to public/app/core/jquery_extended.js index f44245103b58e..449afcf2019c0 100644 --- a/public/app/components/extend-jquery.js +++ b/public/app/core/jquery_extended.js @@ -2,9 +2,6 @@ define(['jquery', 'angular', 'lodash'], function ($, angular, _) { 'use strict'; - /** - * jQuery extensions - */ var $win = $(window); $.fn.place_tt = (function () { diff --git a/public/app/components/lodash.extended.js b/public/app/core/lodash_extended.js similarity index 100% rename from public/app/components/lodash.extended.js rename to public/app/core/lodash_extended.js diff --git a/public/app/components/partials.js b/public/app/core/partials.js similarity index 100% rename from public/app/components/partials.js rename to public/app/core/partials.js diff --git a/public/app/routes/all.js b/public/app/core/routes/all.js similarity index 75% rename from public/app/routes/all.js rename to public/app/core/routes/all.js index 026eccc1d4540..a7e36a0e228cb 100644 --- a/public/app/routes/all.js +++ b/public/app/core/routes/all.js @@ -1,14 +1,16 @@ define([ 'angular', - './dashLoadControllers', -], function(angular) { + '../core_module', + './bundle_loader', + './dashboard_loaders', +], function(angular, coreModule, BundleLoader) { "use strict"; - var module = angular.module('grafana.routes'); - - module.config(function($routeProvider, $locationProvider) { + coreModule.config(function($routeProvider, $locationProvider) { $locationProvider.html5Mode(true); + var loadOrgBundle = new BundleLoader.BundleLoader('app/features/org/all'); + $routeProvider .when('/', { templateUrl: 'app/partials/dashboard.html', @@ -41,30 +43,37 @@ define([ .when('/datasources', { templateUrl: 'app/features/org/partials/datasources.html', controller : 'DataSourcesCtrl', + resolve: loadOrgBundle, }) .when('/datasources/edit/:id', { templateUrl: 'app/features/org/partials/datasourceEdit.html', controller : 'DataSourceEditCtrl', + resolve: loadOrgBundle, }) .when('/datasources/new', { templateUrl: 'app/features/org/partials/datasourceEdit.html', controller : 'DataSourceEditCtrl', + resolve: loadOrgBundle, }) .when('/org', { templateUrl: 'app/features/org/partials/orgDetails.html', controller : 'OrgDetailsCtrl', + resolve: loadOrgBundle, }) .when('/org/new', { templateUrl: 'app/features/org/partials/newOrg.html', controller : 'NewOrgCtrl', + resolve: loadOrgBundle, }) .when('/org/users', { templateUrl: 'app/features/org/partials/orgUsers.html', controller : 'OrgUsersCtrl', + resolve: loadOrgBundle, }) .when('/org/apikeys', { templateUrl: 'app/features/org/partials/orgApiKeys.html', controller : 'OrgApiKeysCtrl', + resolve: loadOrgBundle, }) .when('/profile', { templateUrl: 'app/features/profile/partials/profile.html', @@ -74,13 +83,17 @@ define([ templateUrl: 'app/features/profile/partials/password.html', controller : 'ChangePasswordCtrl', }) + .when('/profile/select-org', { + templateUrl: 'app/features/profile/partials/select_org.html', + controller : 'SelectOrgCtrl', + }) .when('/admin/settings', { templateUrl: 'app/features/admin/partials/settings.html', controller : 'AdminSettingsCtrl', }) .when('/admin/users', { templateUrl: 'app/features/admin/partials/users.html', - controller : 'AdminUsersCtrl', + controller : 'AdminListUsersCtrl', }) .when('/admin/users/create', { templateUrl: 'app/features/admin/partials/new_user.html', @@ -92,11 +105,24 @@ define([ }) .when('/admin/orgs', { templateUrl: 'app/features/admin/partials/orgs.html', + controller : 'AdminListOrgsCtrl', + }) + .when('/admin/orgs/edit/:id', { + templateUrl: 'app/features/admin/partials/edit_org.html', + controller : 'AdminEditOrgCtrl', }) .when('/login', { templateUrl: 'app/partials/login.html', controller : 'LoginCtrl', }) + .when('/invite/:code', { + templateUrl: 'app/partials/signup_invited.html', + controller : 'InvitedCtrl', + }) + .when('/signup', { + templateUrl: 'app/partials/signup_step2.html', + controller : 'SignUpCtrl', + }) .when('/user/password/send-reset-email', { templateUrl: 'app/partials/reset_password.html', controller : 'ResetPasswordCtrl', diff --git a/public/app/core/routes/bundle_loader.ts b/public/app/core/routes/bundle_loader.ts new file mode 100644 index 0000000000000..6f132d65619d0 --- /dev/null +++ b/public/app/core/routes/bundle_loader.ts @@ -0,0 +1,23 @@ +/// + +export class BundleLoader { + lazy: any; + loadingDefer: any; + + constructor(bundleName) { + this.lazy = ["$q", "$route", "$rootScope", ($q, $route, $rootScope) => { + if (this.loadingDefer) { + return this.loadingDefer.promise; + } + + this.loadingDefer = $q.defer(); + + require([bundleName], () => { + this.loadingDefer.resolve(); + }); + + return this.loadingDefer.promise; + }]; + + } +} diff --git a/public/app/routes/dashLoadControllers.js b/public/app/core/routes/dashboard_loaders.js similarity index 67% rename from public/app/routes/dashLoadControllers.js rename to public/app/core/routes/dashboard_loaders.js index 9741824dbce05..bcc284e882cec 100644 --- a/public/app/routes/dashLoadControllers.js +++ b/public/app/core/routes/dashboard_loaders.js @@ -1,21 +1,15 @@ define([ - 'angular', - 'lodash', - 'kbn', - 'moment', - 'jquery', + '../core_module', ], -function (angular) { +function (coreModule) { "use strict"; - var module = angular.module('grafana.routes'); - - module.controller('LoadDashboardCtrl', function($scope, $routeParams, dashboardLoaderSrv, backendSrv) { + coreModule.controller('LoadDashboardCtrl', function($scope, $routeParams, dashboardLoaderSrv, backendSrv) { if (!$routeParams.slug) { backendSrv.get('/api/dashboards/home').then(function(result) { var meta = result.meta; - meta.canSave = meta.canShare = meta.canEdit = meta.canStar = false; + meta.canSave = meta.canShare = meta.canStar = false; $scope.initDashboard(result, $scope); }); return; @@ -27,7 +21,7 @@ function (angular) { }); - module.controller('DashFromImportCtrl', function($scope, $location, alertSrv) { + coreModule.controller('DashFromImportCtrl', function($scope, $location, alertSrv) { if (!window.grafanaImportDashboard) { alertSrv.set('Not found', 'Cannot reload page with unsaved imported dashboard', 'warning', 7000); $location.path(''); @@ -39,7 +33,7 @@ function (angular) { }, $scope); }); - module.controller('NewDashboardCtrl', function($scope) { + coreModule.controller('NewDashboardCtrl', function($scope) { $scope.initDashboard({ meta: { canStar: false, canShare: false }, dashboard: { diff --git a/public/app/services/alertSrv.js b/public/app/core/services/alert_srv.js similarity index 87% rename from public/app/services/alertSrv.js rename to public/app/core/services/alert_srv.js index 4a8ef27352365..f3b5329cd60c5 100644 --- a/public/app/services/alertSrv.js +++ b/public/app/core/services/alert_srv.js @@ -1,26 +1,25 @@ define([ 'angular', - 'lodash' + 'lodash', + '../core_module', ], -function (angular, _) { +function (angular, _, coreModule) { 'use strict'; - var module = angular.module('grafana.services'); - - module.service('alertSrv', function($timeout, $sce, $rootScope, $modal, $q) { + coreModule.service('alertSrv', function($timeout, $sce, $rootScope, $modal, $q) { var self = this; this.init = function() { $rootScope.onAppEvent('alert-error', function(e, alert) { self.set(alert[0], alert[1], 'error'); - }); + }, $rootScope); $rootScope.onAppEvent('alert-warning', function(e, alert) { self.set(alert[0], alert[1], 'warning', 5000); - }); + }, $rootScope); $rootScope.onAppEvent('alert-success', function(e, alert) { self.set(alert[0], alert[1], 'success', 3000); - }); - $rootScope.onAppEvent('confirm-modal', this.showConfirmModal); + }, $rootScope); + $rootScope.onAppEvent('confirm-modal', this.showConfirmModal, $rootScope); }; // List of all alert objects @@ -71,7 +70,7 @@ function (angular, _) { var confirmModal = $modal({ template: './app/partials/confirm_modal.html', persist: false, - modalClass: 'confirm-modal', + modalClass: 'modal-no-header confirm-modal', show: false, scope: scope, keyboard: false diff --git a/public/app/core/services/all.js b/public/app/core/services/all.js new file mode 100644 index 0000000000000..2d4415a8fa2b8 --- /dev/null +++ b/public/app/core/services/all.js @@ -0,0 +1,13 @@ +define([ + './alert_srv', + './util_srv', + './datasource_srv', + './context_srv', + './timer', + './keyboard_manager', + './analytics', + './popover_srv', + './segment_srv', + './backend_srv', +], +function () {}); diff --git a/public/app/services/analytics.js b/public/app/core/services/analytics.js similarity index 75% rename from public/app/services/analytics.js rename to public/app/core/services/analytics.js index 4bb7f0c79dbe6..e09ee6c9b77b8 100644 --- a/public/app/services/analytics.js +++ b/public/app/core/services/analytics.js @@ -1,12 +1,11 @@ define([ 'angular', + '../core_module', ], -function(angular) { +function(angular, coreModule) { 'use strict'; - var module = angular.module('grafana.services'); - module.service('googleAnalyticsSrv', function($rootScope, $location) { - + coreModule.service('googleAnalyticsSrv', function($rootScope, $location) { var first = true; this.init = function() { diff --git a/public/app/services/backendSrv.js b/public/app/core/services/backend_srv.js similarity index 89% rename from public/app/services/backendSrv.js rename to public/app/core/services/backend_srv.js index 8d836b65d5ea8..5d6ce8e0fe429 100644 --- a/public/app/services/backendSrv.js +++ b/public/app/core/services/backend_srv.js @@ -1,14 +1,13 @@ define([ 'angular', 'lodash', - 'config', + '../core_module', + 'app/core/config', ], -function (angular, _, config) { +function (angular, _, coreModule, config) { 'use strict'; - var module = angular.module('grafana.services'); - - module.service('backendSrv', function($http, alertSrv, $timeout) { + coreModule.service('backendSrv', function($http, alertSrv, $timeout) { var self = this; this.get = function(url, params) { @@ -37,17 +36,16 @@ function (angular, _, config) { return; } - if (err.status === 422) { - alertSrv.set("Validation failed", "", "warning", 4000); - throw err.data; - } - var data = err.data || { message: 'Unexpected error' }; - if (_.isString(data)) { data = { message: data }; } + if (err.status === 422) { + alertSrv.set("Validation failed", data.message, "warning", 4000); + throw data; + } + data.severity = 'error'; if (err.status < 500) { @@ -107,6 +105,11 @@ function (angular, _, config) { }); } + // for Prometheus + if (!err.data.message && _.isString(err.data.error)) { + err.data.message = err.data.error; + } + throw err; }); }; diff --git a/public/app/services/contextSrv.js b/public/app/core/services/context_srv.js similarity index 89% rename from public/app/services/contextSrv.js rename to public/app/core/services/context_srv.js index b3f8a1ed164d6..77f10fdf16ad4 100644 --- a/public/app/services/contextSrv.js +++ b/public/app/core/services/context_srv.js @@ -1,15 +1,14 @@ define([ 'angular', 'lodash', - 'store', - 'config', + '../core_module', + 'app/core/store', + 'app/core/config', ], -function (angular, _, store, config) { +function (angular, _, coreModule, store, config) { 'use strict'; - var module = angular.module('grafana.services'); - - module.service('contextSrv', function($rootScope, $timeout) { + coreModule.service('contextSrv', function($rootScope, $timeout) { var self = this; function User() { diff --git a/public/app/services/datasourceSrv.js b/public/app/core/services/datasource_srv.js similarity index 80% rename from public/app/services/datasourceSrv.js rename to public/app/core/services/datasource_srv.js index 35256c9fe4f51..9709bb88cdf76 100644 --- a/public/app/services/datasourceSrv.js +++ b/public/app/core/services/datasource_srv.js @@ -1,14 +1,13 @@ define([ 'angular', 'lodash', - 'config', + '../core_module', + 'app/core/config', ], -function (angular, _, config) { +function (angular, _, coreModule, config) { 'use strict'; - var module = angular.module('grafana.services'); - - module.service('datasourceSrv', function($q, $injector, $rootScope) { + coreModule.service('datasourceSrv', function($q, $injector, $rootScope) { var self = this; this.init = function() { @@ -20,13 +19,24 @@ function (angular, _, config) { if (value.meta && value.meta.metrics) { self.metricSources.push({ value: key === config.defaultDatasource ? null : key, - name: key + name: key, + meta: value.meta, }); } if (value.meta && value.meta.annotations) { self.annotationSources.push(value); } }); + + this.metricSources.sort(function(a, b) { + if (a.meta.builtIn || a.name > b.name) { + return 1; + } + if (a.name < b.name) { + return -1; + } + return 0; + }); }; this.get = function(name) { diff --git a/public/app/services/keyboardManager.js b/public/app/core/services/keyboard_manager.js similarity index 97% rename from public/app/services/keyboardManager.js rename to public/app/core/services/keyboard_manager.js index f45ddad305b4b..14b6546528157 100644 --- a/public/app/services/keyboardManager.js +++ b/public/app/core/services/keyboard_manager.js @@ -1,15 +1,14 @@ define([ 'angular', - 'lodash' + 'lodash', + '../core_module', ], -function (angular, _) { +function (angular, _, coreModule) { 'use strict'; - var module = angular.module('grafana.services'); - // This service was based on OpenJS library available in BSD License // http://www.openjs.com/scripts/events/keyboard_shortcuts/index.php - module.factory('keyboardManager', ['$window', '$timeout', function ($window, $timeout) { + coreModule.factory('keyboardManager', ['$window', '$timeout', function ($window, $timeout) { var keyboardManagerService = {}; var defaultOpt = { diff --git a/public/app/services/popoverSrv.js b/public/app/core/services/popover_srv.js similarity index 88% rename from public/app/services/popoverSrv.js rename to public/app/core/services/popover_srv.js index cec294178c096..26a935bf2836c 100644 --- a/public/app/services/popoverSrv.js +++ b/public/app/core/services/popover_srv.js @@ -2,13 +2,12 @@ define([ 'angular', 'lodash', 'jquery', + '../core_module', ], -function (angular, _, $) { +function (angular, _, $, coreModule) { 'use strict'; - var module = angular.module('grafana.services'); - - module.service('popoverSrv', function($templateCache, $timeout, $q, $http, $compile) { + coreModule.service('popoverSrv', function($templateCache, $timeout, $q, $http, $compile) { this.getTemplate = function(url) { return $q.when($templateCache.get(url) || $http.get(url, {cache: true})); diff --git a/public/app/core/services/segment_srv.js b/public/app/core/services/segment_srv.js new file mode 100644 index 0000000000000..836437a6dc5df --- /dev/null +++ b/public/app/core/services/segment_srv.js @@ -0,0 +1,110 @@ +define([ + 'angular', + 'lodash', + '../core_module', +], +function (angular, _, coreModule) { + 'use strict'; + + coreModule.service('uiSegmentSrv', function($sce, templateSrv) { + var self = this; + + function MetricSegment(options) { + if (options === '*' || options.value === '*') { + this.value = '*'; + this.html = $sce.trustAsHtml(''); + this.expandable = true; + return; + } + + if (_.isString(options)) { + this.value = options; + this.html = $sce.trustAsHtml(this.value); + return; + } + + this.cssClass = options.cssClass; + this.custom = options.custom; + this.type = options.type; + this.fake = options.fake; + this.value = options.value; + this.type = options.type; + this.expandable = options.expandable; + this.html = options.html || $sce.trustAsHtml(templateSrv.highlightVariablesAsHtml(this.value)); + } + + this.getSegmentForValue = function(value, fallbackText) { + if (value) { + return this.newSegment(value); + } else { + return this.newSegment({value: fallbackText, fake: true}); + } + }; + + this.newSelectMeasurement = function() { + return new MetricSegment({value: 'select measurement', fake: true}); + }; + + this.newFake = function(text, type, cssClass) { + return new MetricSegment({value: text, fake: true, type: type, cssClass: cssClass}); + }; + + this.newSegment = function(options) { + return new MetricSegment(options); + }; + + this.newKey = function(key) { + return new MetricSegment({value: key, type: 'key', cssClass: 'query-segment-key' }); + }; + + this.newKeyValue = function(value) { + return new MetricSegment({value: value, type: 'value', cssClass: 'query-segment-value' }); + }; + + this.newCondition = function(condition) { + return new MetricSegment({value: condition, type: 'condition', cssClass: 'query-keyword' }); + }; + + this.newOperator = function(op) { + return new MetricSegment({value: op, type: 'operator', cssClass: 'query-segment-operator' }); + }; + + this.newOperators = function(ops) { + return _.map(ops, function(op) { + return new MetricSegment({value: op, type: 'operator', cssClass: 'query-segment-operator' }); + }); + }; + + this.transformToSegments = function(addTemplateVars, variableTypeFilter) { + return function(results) { + var segments = _.map(results, function(segment) { + return self.newSegment({ value: segment.text, expandable: segment.expandable }); + }); + + if (addTemplateVars) { + _.each(templateSrv.variables, function(variable) { + if (variableTypeFilter === void 0 || variableTypeFilter === variable.type) { + segments.unshift(self.newSegment({ type: 'template', value: '$' + variable.name, expandable: true })); + } + }); + } + + return segments; + }; + }; + + this.newSelectMetric = function() { + return new MetricSegment({value: 'select metric', fake: true}); + }; + + this.newPlusButton = function() { + return new MetricSegment({fake: true, html: '', type: 'plus-button' }); + }; + + this.newSelectTagValue = function() { + return new MetricSegment({value: 'select tag value', fake: true}); + }; + + }); + +}); diff --git a/public/app/services/timer.js b/public/app/core/services/timer.js similarity index 80% rename from public/app/services/timer.js rename to public/app/core/services/timer.js index 3939fe6b78f02..668388a786e12 100644 --- a/public/app/services/timer.js +++ b/public/app/core/services/timer.js @@ -1,13 +1,12 @@ define([ 'angular', - 'lodash' + 'lodash', + '../core_module', ], -function (angular, _) { +function (angular, _, coreModule) { 'use strict'; - var module = angular.module('grafana.services'); - - module.service('timer', function($timeout) { + coreModule.service('timer', function($timeout) { // This service really just tracks a list of $timeout promises to give us a // method for cancelling them all when we need to @@ -31,4 +30,4 @@ function (angular, _) { }; }); -}); \ No newline at end of file +}); diff --git a/public/app/services/utilSrv.js b/public/app/core/services/util_srv.js similarity index 63% rename from public/app/services/utilSrv.js rename to public/app/core/services/util_srv.js index 7fca0c0334e2a..2418546e5dacf 100644 --- a/public/app/services/utilSrv.js +++ b/public/app/core/services/util_srv.js @@ -1,19 +1,19 @@ define([ 'angular', + '../core_module', ], -function (angular) { +function (angular, coreModule) { 'use strict'; - var module = angular.module('grafana.services'); - - module.service('utilSrv', function($rootScope, $modal, $q) { + coreModule.service('utilSrv', function($rootScope, $modal, $q) { this.init = function() { - $rootScope.onAppEvent('show-modal', this.showModal); + $rootScope.onAppEvent('show-modal', this.showModal, $rootScope); }; this.showModal = function(e, options) { var modal = $modal({ + modalClass: options.modalClass, template: options.src, persist: false, show: false, diff --git a/public/app/core/settings.js b/public/app/core/settings.js new file mode 100644 index 0000000000000..59eaf8ea8b9f5 --- /dev/null +++ b/public/app/core/settings.js @@ -0,0 +1,27 @@ +define([ + 'lodash', +], +function (_) { + "use strict"; + + return function Settings (options) { + var defaults = { + datasources : {}, + window_title_prefix : 'Grafana - ', + panels : { + 'graph': { path: 'app/panels/graph', name: 'Graph' }, + 'table': { path: 'app/panels/table', name: 'Table' }, + 'singlestat': { path: 'app/panels/singlestat', name: 'Single stat' }, + 'text': { path: 'app/panels/text', name: 'Text' }, + 'dashlist': { path: 'app/panels/dashlist', name: 'Dashboard list' }, + }, + new_panel_title: 'Panel Title', + plugins: {}, + playlist_timespan: "1m", + unsaved_changes_warning: true, + appSubUrl: "" + }; + + return _.extend({}, defaults, options); + }; +}); diff --git a/public/app/components/store.js b/public/app/core/store.js similarity index 88% rename from public/app/components/store.js rename to public/app/core/store.js index 84e72b963143c..504b0e5aff5d2 100644 --- a/public/app/components/store.js +++ b/public/app/core/store.js @@ -12,7 +12,7 @@ define([], function() { if (def !== void 0 && !this.exists(key)) { return def; } - return window.localStorage[key] === 'true' ? true : false; + return window.localStorage[key] === 'true'; }, exists: function(key) { return window.localStorage[key] !== void 0; diff --git a/public/app/components/timeSeries.js b/public/app/core/time_series.ts similarity index 70% rename from public/app/components/timeSeries.js rename to public/app/core/time_series.ts index 74194e68ff97e..429e3882e8b33 100644 --- a/public/app/components/timeSeries.js +++ b/public/app/core/time_series.ts @@ -1,11 +1,48 @@ -define([ - 'lodash', - 'kbn' -], -function (_, kbn) { - 'use strict'; - - function TimeSeries(opts) { +/// + +import _ = require('lodash'); +import kbn = require('app/core/utils/kbn'); + +function matchSeriesOverride(aliasOrRegex, seriesAlias) { + if (!aliasOrRegex) { return false; } + + if (aliasOrRegex[0] === '/') { + var regex = kbn.stringToJsRegex(aliasOrRegex); + return seriesAlias.match(regex) != null; + } + + return aliasOrRegex === seriesAlias; +} + +function translateFillOption(fill) { + return fill === 0 ? 0.001 : fill/10; +} + +class TimeSeries { + datapoints: any; + id: string; + label: string; + alias: string; + color: string; + valueFormater: any; + stats: any; + legend: boolean; + allIsNull: boolean; + allIsZero: boolean; + decimals: number; + scaledDecimals: number; + + lines: any; + bars: any; + points: any; + yaxis: any; + zindex: any; + stack: any; + nullPointMode: any; + fillBelowTo: any; + transform: any; + + constructor(opts) { this.datapoints = opts.datapoints; this.label = opts.alias; this.id = opts.alias; @@ -16,27 +53,13 @@ function (_, kbn) { this.legend = true; } - function matchSeriesOverride(aliasOrRegex, seriesAlias) { - if (!aliasOrRegex) { return false; } - - if (aliasOrRegex[0] === '/') { - var regex = kbn.stringToJsRegex(aliasOrRegex); - return seriesAlias.match(regex) != null; - } - - return aliasOrRegex === seriesAlias; - } - - function translateFillOption(fill) { - return fill === 0 ? 0.001 : fill/10; - } - - TimeSeries.prototype.applySeriesOverrides = function(overrides) { + applySeriesOverrides(overrides) { this.lines = {}; this.points = {}; this.bars = {}; this.yaxis = 1; this.zindex = 0; + this.nullPointMode = null; delete this.stack; for (var i = 0; i < overrides.length; i++) { @@ -50,6 +73,7 @@ function (_, kbn) { if (override.fill !== void 0) { this.lines.fill = translateFillOption(override.fill); } if (override.stack !== void 0) { this.stack = override.stack; } if (override.linewidth !== void 0) { this.lines.lineWidth = override.linewidth; } + if (override.nullPointMode !== void 0) { this.nullPointMode = override.nullPointMode; } if (override.pointradius !== void 0) { this.points.radius = override.pointradius; } if (override.steppedLine !== void 0) { this.lines.steps = override.steppedLine; } if (override.zindex !== void 0) { this.zindex = override.zindex; } @@ -64,7 +88,7 @@ function (_, kbn) { } }; - TimeSeries.prototype.getFlotPairs = function (fillStyle) { + getFlotPairs(fillStyle) { var result = []; this.stats.total = 0; @@ -73,11 +97,13 @@ function (_, kbn) { this.stats.avg = null; this.stats.current = null; this.allIsNull = true; + this.allIsZero = true; var ignoreNulls = fillStyle === 'connected'; var nullAsZero = fillStyle === 'null as zero'; var currentTime; var currentValue; + var nonNulls = 0; for (var i = 0; i < this.datapoints.length; i++) { currentValue = this.datapoints[i][0]; @@ -94,6 +120,7 @@ function (_, kbn) { if (_.isNumber(currentValue)) { this.stats.total += currentValue; this.allIsNull = false; + nonNulls++; } if (currentValue > this.stats.max) { @@ -105,6 +132,10 @@ function (_, kbn) { } } + if (currentValue != 0) { + this.allIsZero = false; + } + result.push([currentTime, currentValue]); } @@ -116,26 +147,26 @@ function (_, kbn) { if (this.stats.min === Number.MAX_VALUE) { this.stats.min = null; } if (result.length) { - this.stats.avg = (this.stats.total / result.length); + this.stats.avg = (this.stats.total / nonNulls); this.stats.current = result[result.length-1][1]; if (this.stats.current === null && result.length > 1) { this.stats.current = result[result.length-2][1]; } } + this.stats.count = result.length; return result; - }; + } - TimeSeries.prototype.updateLegendValues = function(formater, decimals, scaledDecimals) { + updateLegendValues(formater, decimals, scaledDecimals) { this.valueFormater = formater; this.decimals = decimals; this.scaledDecimals = scaledDecimals; - }; + } - TimeSeries.prototype.formatValue = function(value) { + formatValue(value) { return this.valueFormater(value, this.decimals, this.scaledDecimals); - }; - - return TimeSeries; + } +} -}); +export = TimeSeries; diff --git a/public/app/core/utils/datemath.ts b/public/app/core/utils/datemath.ts new file mode 100644 index 0000000000000..a19beb116d881 --- /dev/null +++ b/public/app/core/utils/datemath.ts @@ -0,0 +1,125 @@ +/// + +import _ = require('lodash'); +import moment = require('moment'); + +var units = ['y', 'M', 'w', 'd', 'h', 'm', 's']; +var unitsAsc = _.sortBy(units, function (unit) { + return moment.duration(1, unit).valueOf(); +}); + +var unitsDesc = unitsAsc.reverse(); + +function parse(text, roundUp?) { + if (!text) { return undefined; } + if (moment.isMoment(text)) { return text; } + if (_.isDate(text)) { return moment(text); } + + var time; + var mathString = ''; + var index; + var parseString; + + if (text.substring(0, 3) === 'now') { + time = moment(); + mathString = text.substring('now'.length); + } else { + index = text.indexOf('||'); + if (index === -1) { + parseString = text; + mathString = ''; // nothing else + } else { + parseString = text.substring(0, index); + mathString = text.substring(index + 2); + } + // We're going to just require ISO8601 timestamps, k? + time = moment(parseString); + } + + if (!mathString.length) { + return time; + } + + return parseDateMath(mathString, time, roundUp); +} + +function isValid(text) { + var date = parse(text); + if (!date) { + return false; + } + + if (moment.isMoment(date)) { + return date.isValid(); + } + + return false; +} + +function parseDateMath(mathString, time, roundUp?) { + var dateTime = time; + var i = 0; + var len = mathString.length; + + while (i < len) { + var c = mathString.charAt(i++); + var type; + var num; + var unit; + + if (c === '/') { + type = 0; + } else if (c === '+') { + type = 1; + } else if (c === '-') { + type = 2; + } else { + return undefined; + } + + if (isNaN(mathString.charAt(i))) { + num = 1; + } else if (mathString.length === 2) { + num = mathString.charAt(i); + } else { + var numFrom = i; + while (!isNaN(mathString.charAt(i))) { + i++; + if (i > 10) { return undefined; } + } + num = parseInt(mathString.substring(numFrom, i), 10); + } + + if (type === 0) { + // rounding is only allowed on whole, single, units (eg M or 1M, not 0.5M or 2M) + if (num !== 1) { + return undefined; + } + } + unit = mathString.charAt(i++); + + if (!_.contains(units, unit)) { + return undefined; + } else { + if (type === 0) { + if (roundUp) { + dateTime.endOf(unit); + } + else { + dateTime.startOf(unit); + } + } else if (type === 1) { + dateTime.add(num, unit); + } else if (type === 2) { + dateTime.subtract(num, unit); + } + } + } + return dateTime; +} + +export = { + parse: parse, + parseDateMath: parseDateMath, + isValid: isValid +}; diff --git a/public/app/core/utils/flatten.ts b/public/app/core/utils/flatten.ts new file mode 100644 index 0000000000000..fb4c47d1e3da9 --- /dev/null +++ b/public/app/core/utils/flatten.ts @@ -0,0 +1,39 @@ +// Copyright (c) 2014, Hugh Kennedy +// Based on code from https://github.com/hughsk/flat/blob/master/index.js +// +function flatten(target, opts): any { + opts = opts || {}; + + var delimiter = opts.delimiter || '.'; + var maxDepth = opts.maxDepth || 3; + var currentDepth = 1; + var output = {}; + + function step(object, prev) { + Object.keys(object).forEach(function(key) { + var value = object[key]; + var isarray = opts.safe && Array.isArray(value); + var type = Object.prototype.toString.call(value); + var isobject = type === "[object Object]"; + + var newKey = prev ? prev + delimiter + key : key; + + if (!opts.maxDepth) { + maxDepth = currentDepth + 1; + } + + if (!isarray && isobject && Object.keys(value).length && currentDepth < maxDepth) { + ++currentDepth; + return step(value, newKey); + } + + output[newKey] = value; + }); + } + + step(target, null); + + return output; +} + +export = flatten; diff --git a/public/app/core/utils/kbn.js b/public/app/core/utils/kbn.js new file mode 100644 index 0000000000000..123e8e16be117 --- /dev/null +++ b/public/app/core/utils/kbn.js @@ -0,0 +1,667 @@ +define([ + 'jquery', + 'lodash', +], +function($, _) { + 'use strict'; + + var kbn = {}; + kbn.valueFormats = {}; + + ///// HELPER FUNCTIONS ///// + + kbn.round_interval = function(interval) { + switch (true) { + // 0.5s + case (interval <= 500): + return 100; // 0.1s + // 5s + case (interval <= 5000): + return 1000; // 1s + // 7.5s + case (interval <= 7500): + return 5000; // 5s + // 15s + case (interval <= 15000): + return 10000; // 10s + // 45s + case (interval <= 45000): + return 30000; // 30s + // 3m + case (interval <= 180000): + return 60000; // 1m + // 9m + case (interval <= 450000): + return 300000; // 5m + // 20m + case (interval <= 1200000): + return 600000; // 10m + // 45m + case (interval <= 2700000): + return 1800000; // 30m + // 2h + case (interval <= 7200000): + return 3600000; // 1h + // 6h + case (interval <= 21600000): + return 10800000; // 3h + // 24h + case (interval <= 86400000): + return 43200000; // 12h + // 48h + case (interval <= 172800000): + return 86400000; // 24h + // 1w + case (interval <= 604800000): + return 86400000; // 24h + // 3w + case (interval <= 1814400000): + return 604800000; // 1w + // 2y + case (interval < 3628800000): + return 2592000000; // 30d + default: + return 31536000000; // 1y + } + }; + + kbn.secondsToHms = function(seconds) { + var numyears = Math.floor(seconds / 31536000); + if(numyears){ + return numyears + 'y'; + } + var numdays = Math.floor((seconds % 31536000) / 86400); + if(numdays){ + return numdays + 'd'; + } + var numhours = Math.floor(((seconds % 31536000) % 86400) / 3600); + if(numhours){ + return numhours + 'h'; + } + var numminutes = Math.floor((((seconds % 31536000) % 86400) % 3600) / 60); + if(numminutes){ + return numminutes + 'm'; + } + var numseconds = Math.floor((((seconds % 31536000) % 86400) % 3600) % 60); + if(numseconds){ + return numseconds + 's'; + } + var nummilliseconds = Math.floor(seconds * 1000.0); + if(nummilliseconds){ + return nummilliseconds + 'ms'; + } + + return 'less then a millisecond'; //'just now' //or other string you like; + }; + + kbn.to_percent = function(number,outof) { + return Math.floor((number/outof)*10000)/100 + "%"; + }; + + kbn.addslashes = function(str) { + str = str.replace(/\\/g, '\\\\'); + str = str.replace(/\'/g, '\\\''); + str = str.replace(/\"/g, '\\"'); + str = str.replace(/\0/g, '\\0'); + return str; + }; + + kbn.interval_regex = /(\d+(?:\.\d+)?)([Mwdhmsy])/; + + // histogram & trends + kbn.intervals_in_seconds = { + y: 31536000, + M: 2592000, + w: 604800, + d: 86400, + h: 3600, + m: 60, + s: 1 + }; + + kbn.calculateInterval = function(range, resolution, userInterval) { + var lowLimitMs = 1; // 1 millisecond default low limit + var intervalMs, lowLimitInterval; + + if (userInterval) { + if (userInterval[0] === '>') { + lowLimitInterval = userInterval.slice(1); + lowLimitMs = kbn.interval_to_ms(lowLimitInterval); + } + else { + return userInterval; + } + } + + intervalMs = kbn.round_interval((range.to.valueOf() - range.from.valueOf()) / resolution); + if (lowLimitMs > intervalMs) { + intervalMs = lowLimitMs; + } + + return kbn.secondsToHms(intervalMs / 1000); + }; + + kbn.describe_interval = function (string) { + var matches = string.match(kbn.interval_regex); + if (!matches || !_.has(kbn.intervals_in_seconds, matches[2])) { + throw new Error('Invalid interval string, expexcting a number followed by one of "Mwdhmsy"'); + } else { + return { + sec: kbn.intervals_in_seconds[matches[2]], + type: matches[2], + count: parseInt(matches[1], 10) + }; + } + }; + + kbn.interval_to_ms = function(string) { + var info = kbn.describe_interval(string); + return info.sec * 1000 * info.count; + }; + + kbn.interval_to_seconds = function (string) { + var info = kbn.describe_interval(string); + return info.sec * info.count; + }; + + kbn.query_color_dot = function (color, diameter) { + return '
    '; + }; + + kbn.slugifyForUrl = function(str) { + return str + .toLowerCase() + .replace(/[^\w ]+/g,'') + .replace(/ +/g,'-'); + }; + + kbn.exportSeriesListToCsv = function(seriesList) { + var text = 'Series;Time;Value\n'; + _.each(seriesList, function(series) { + _.each(series.datapoints, function(dp) { + text += series.alias + ';' + new Date(dp[1]).toISOString() + ';' + dp[0] + '\n'; + }); + }); + var blob = new Blob([text], { type: "text/csv;charset=utf-8" }); + window.saveAs(blob, 'grafana_data_export.csv'); + }; + + kbn.stringToJsRegex = function(str) { + if (str[0] !== '/') { + return new RegExp('^' + str + '$'); + } + + var match = str.match(new RegExp('^/(.*?)/(g?i?m?y?)$')); + return new RegExp(match[1], match[2]); + }; + + kbn.toFixed = function(value, decimals) { + if (value === null) { + return ""; + } + + var factor = decimals ? Math.pow(10, Math.max(0, decimals)) : 1; + var formatted = String(Math.round(value * factor) / factor); + + // if exponent return directly + if (formatted.indexOf('e') !== -1 || value === 0) { + return formatted; + } + + // If tickDecimals was specified, ensure that we have exactly that + // much precision; otherwise default to the value's own precision. + if (decimals != null) { + var decimalPos = formatted.indexOf("."); + var precision = decimalPos === -1 ? 0 : formatted.length - decimalPos - 1; + if (precision < decimals) { + return (precision ? formatted : formatted + ".") + (String(factor)).substr(1, decimals - precision); + } + } + + return formatted; + }; + + kbn.toFixedScaled = function(value, decimals, scaledDecimals, additionalDecimals, ext) { + if (scaledDecimals === null) { + return kbn.toFixed(value, decimals) + ext; + } else { + return kbn.toFixed(value, scaledDecimals + additionalDecimals) + ext; + } + }; + + kbn.roundValue = function (num, decimals) { + if (num === null) { return null; } + var n = Math.pow(10, decimals); + return Math.round((n * num).toFixed(decimals)) / n; + }; + + ///// FORMAT FUNCTION CONSTRUCTORS ///// + + kbn.formatBuilders = {}; + + // Formatter which always appends a fixed unit string to the value. No + // scaling of the value is performed. + kbn.formatBuilders.fixedUnit = function(unit) { + return function(size, decimals) { + if (size === null) { return ""; } + return kbn.toFixed(size, decimals) + ' ' + unit; + }; + }; + + // Formatter which scales the unit string geometrically according to the given + // numeric factor. Repeatedly scales the value down by the factor until it is + // less than the factor in magnitude, or the end of the array is reached. + kbn.formatBuilders.scaledUnits = function(factor, extArray) { + return function(size, decimals, scaledDecimals) { + if (size === null) { + return ""; + } + + var steps = 0; + var limit = extArray.length; + + while (Math.abs(size) >= factor) { + steps++; + size /= factor; + + if (steps >= limit) { return "NA"; } + } + + if (steps > 0 && scaledDecimals !== null) { + decimals = scaledDecimals + (3 * steps); + } + + return kbn.toFixed(size, decimals) + extArray[steps]; + }; + }; + + // Extension of the scaledUnits builder which uses SI decimal prefixes. If an + // offset is given, it adjusts the starting units at the given prefix; a value + // of 0 starts at no scale; -3 drops to nano, +2 starts at mega, etc. + kbn.formatBuilders.decimalSIPrefix = function(unit, offset) { + var prefixes = ['n', 'µ', 'm', '', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']; + prefixes = prefixes.slice(3 + (offset || 0)); + var units = prefixes.map(function(p) { return ' ' + p + unit; }); + return kbn.formatBuilders.scaledUnits(1000, units); + }; + + // Extension of the scaledUnits builder which uses SI binary prefixes. If + // offset is given, it starts the units at the given prefix; otherwise, the + // offset defaults to zero and the initial unit is not prefixed. + kbn.formatBuilders.binarySIPrefix = function(unit, offset) { + var prefixes = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'].slice(offset); + var units = prefixes.map(function(p) { return ' ' + p + unit; }); + return kbn.formatBuilders.scaledUnits(1024, units); + }; + + // Currency formatter for prefixing a symbol onto a number. Supports scaling + // up to the trillions. + kbn.formatBuilders.currency = function(symbol) { + var units = ['', 'K', 'M', 'B', 'T']; + var scaler = kbn.formatBuilders.scaledUnits(1000, units); + return function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + var scaled = scaler(size, decimals, scaledDecimals); + return symbol + scaled; + }; + }; + + kbn.formatBuilders.simpleCountUnit = function(symbol) { + var units = ['', 'K', 'M', 'B', 'T']; + var scaler = kbn.formatBuilders.scaledUnits(1000, units); + return function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + var scaled = scaler(size, decimals, scaledDecimals); + return scaled + " " + symbol; + }; + }; + + ///// VALUE FORMATS ///// + + // Dimensionless Units + kbn.valueFormats.none = kbn.toFixed; + kbn.valueFormats.short = kbn.formatBuilders.scaledUnits(1000, ['', ' K', ' Mil', ' Bil', ' Tri', ' Quadr', ' Quint', ' Sext', ' Sept']); + kbn.valueFormats.dB = kbn.formatBuilders.fixedUnit('dB'); + kbn.valueFormats.ppm = kbn.formatBuilders.fixedUnit('ppm'); + + kbn.valueFormats.percent = function(size, decimals) { + if (size === null) { return ""; } + return kbn.toFixed(size, decimals) + '%'; + }; + + kbn.valueFormats.percentunit = function(size, decimals) { + if (size === null) { return ""; } + return kbn.toFixed(100*size, decimals) + '%'; + }; + + // Currencies + kbn.valueFormats.currencyUSD = kbn.formatBuilders.currency('$'); + kbn.valueFormats.currencyGBP = kbn.formatBuilders.currency('£'); + kbn.valueFormats.currencyEUR = kbn.formatBuilders.currency('€'); + kbn.valueFormats.currencyJPY = kbn.formatBuilders.currency('¥'); + + // Data + kbn.valueFormats.bits = kbn.formatBuilders.binarySIPrefix('b'); + kbn.valueFormats.bytes = kbn.formatBuilders.binarySIPrefix('B'); + kbn.valueFormats.kbytes = kbn.formatBuilders.binarySIPrefix('B', 1); + kbn.valueFormats.mbytes = kbn.formatBuilders.binarySIPrefix('B', 2); + kbn.valueFormats.gbytes = kbn.formatBuilders.binarySIPrefix('B', 3); + + // Data Rate + kbn.valueFormats.pps = kbn.formatBuilders.decimalSIPrefix('pps'); + kbn.valueFormats.bps = kbn.formatBuilders.decimalSIPrefix('bps'); + kbn.valueFormats.Bps = kbn.formatBuilders.decimalSIPrefix('Bps'); + + // Throughput + kbn.valueFormats.ops = kbn.formatBuilders.simpleCountUnit('ops'); + kbn.valueFormats.rps = kbn.formatBuilders.simpleCountUnit('rps'); + kbn.valueFormats.wps = kbn.formatBuilders.simpleCountUnit('wps'); + kbn.valueFormats.iops = kbn.formatBuilders.simpleCountUnit('iops'); + + // Energy + kbn.valueFormats.watt = kbn.formatBuilders.decimalSIPrefix('W'); + kbn.valueFormats.kwatt = kbn.formatBuilders.decimalSIPrefix('W', 1); + kbn.valueFormats.watth = kbn.formatBuilders.decimalSIPrefix('Wh'); + kbn.valueFormats.kwatth = kbn.formatBuilders.decimalSIPrefix('Wh', 1); + kbn.valueFormats.joule = kbn.formatBuilders.decimalSIPrefix('J'); + kbn.valueFormats.ev = kbn.formatBuilders.decimalSIPrefix('eV'); + kbn.valueFormats.amp = kbn.formatBuilders.decimalSIPrefix('A'); + kbn.valueFormats.volt = kbn.formatBuilders.decimalSIPrefix('V'); + + // Temperature + kbn.valueFormats.celsius = kbn.formatBuilders.fixedUnit('°C'); + kbn.valueFormats.farenheit = kbn.formatBuilders.fixedUnit('°F'); + kbn.valueFormats.kelvin = kbn.formatBuilders.fixedUnit('K'); + kbn.valueFormats.humidity = kbn.formatBuilders.fixedUnit('%H'); + + // Pressure + kbn.valueFormats.pressurembar = kbn.formatBuilders.fixedUnit('mbar'); + kbn.valueFormats.pressurehpa = kbn.formatBuilders.fixedUnit('hPa'); + kbn.valueFormats.pressurehg = kbn.formatBuilders.fixedUnit('"Hg'); + kbn.valueFormats.pressurepsi = kbn.formatBuilders.scaledUnits(1000, [' psi', ' ksi', ' Mpsi']); + + // Length + kbn.valueFormats.lengthm = kbn.formatBuilders.decimalSIPrefix('m'); + kbn.valueFormats.lengthmm = kbn.formatBuilders.decimalSIPrefix('m', -1); + kbn.valueFormats.lengthkm = kbn.formatBuilders.decimalSIPrefix('m', 1); + kbn.valueFormats.lengthmi = kbn.formatBuilders.fixedUnit('mi'); + + // Velocity + kbn.valueFormats.velocityms = kbn.formatBuilders.fixedUnit('m/s'); + kbn.valueFormats.velocitykmh = kbn.formatBuilders.fixedUnit('km/h'); + kbn.valueFormats.velocitymph = kbn.formatBuilders.fixedUnit('mph'); + kbn.valueFormats.velocityknot = kbn.formatBuilders.fixedUnit('kn'); + + // Volume + kbn.valueFormats.litre = kbn.formatBuilders.decimalSIPrefix('L'); + kbn.valueFormats.mlitre = kbn.formatBuilders.decimalSIPrefix('L', -1); + + // Time + kbn.valueFormats.hertz = kbn.formatBuilders.decimalSIPrefix('Hz'); + + kbn.valueFormats.ms = function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + + if (Math.abs(size) < 1000) { + return kbn.toFixed(size, decimals) + " ms"; + } + // Less than 1 min + else if (Math.abs(size) < 60000) { + return kbn.toFixedScaled(size / 1000, decimals, scaledDecimals, 3, " s"); + } + // Less than 1 hour, devide in minutes + else if (Math.abs(size) < 3600000) { + return kbn.toFixedScaled(size / 60000, decimals, scaledDecimals, 5, " min"); + } + // Less than one day, devide in hours + else if (Math.abs(size) < 86400000) { + return kbn.toFixedScaled(size / 3600000, decimals, scaledDecimals, 7, " hour"); + } + // Less than one year, devide in days + else if (Math.abs(size) < 31536000000) { + return kbn.toFixedScaled(size / 86400000, decimals, scaledDecimals, 8, " day"); + } + + return kbn.toFixedScaled(size / 31536000000, decimals, scaledDecimals, 10, " year"); + }; + + kbn.valueFormats.s = function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + + if (Math.abs(size) < 60) { + return kbn.toFixed(size, decimals) + " s"; + } + // Less than 1 hour, devide in minutes + else if (Math.abs(size) < 3600) { + return kbn.toFixedScaled(size / 60, decimals, scaledDecimals, 1, " min"); + } + // Less than one day, devide in hours + else if (Math.abs(size) < 86400) { + return kbn.toFixedScaled(size / 3600, decimals, scaledDecimals, 4, " hour"); + } + // Less than one week, devide in days + else if (Math.abs(size) < 604800) { + return kbn.toFixedScaled(size / 86400, decimals, scaledDecimals, 5, " day"); + } + // Less than one year, devide in week + else if (Math.abs(size) < 31536000) { + return kbn.toFixedScaled(size / 604800, decimals, scaledDecimals, 6, " week"); + } + + return kbn.toFixedScaled(size / 3.15569e7, decimals, scaledDecimals, 7, " year"); + }; + + kbn.valueFormats['µs'] = function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + + if (Math.abs(size) < 1000) { + return kbn.toFixed(size, decimals) + " µs"; + } + else if (Math.abs(size) < 1000000) { + return kbn.toFixedScaled(size / 1000, decimals, scaledDecimals, 3, " ms"); + } + else { + return kbn.toFixedScaled(size / 1000000, decimals, scaledDecimals, 6, " s"); + } + }; + + kbn.valueFormats.ns = function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + + if (Math.abs(size) < 1000) { + return kbn.toFixed(size, decimals) + " ns"; + } + else if (Math.abs(size) < 1000000) { + return kbn.toFixedScaled(size / 1000, decimals, scaledDecimals, 3, " µs"); + } + else if (Math.abs(size) < 1000000000) { + return kbn.toFixedScaled(size / 1000000, decimals, scaledDecimals, 6, " ms"); + } + else if (Math.abs(size) < 60000000000){ + return kbn.toFixedScaled(size / 1000000000, decimals, scaledDecimals, 9, " s"); + } + else { + return kbn.toFixedScaled(size / 60000000000, decimals, scaledDecimals, 12, " min"); + } + }; + + kbn.valueFormats.m = function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + + if (Math.abs(size) < 60) { + return kbn.toFixed(size, decimals) + " min"; + } + else if (Math.abs(size) < 1440) { + return kbn.toFixedScaled(size / 60, decimals, scaledDecimals, 2, " hour"); + } + else if (Math.abs(size) < 10080) { + return kbn.toFixedScaled(size / 1440, decimals, scaledDecimals, 3, " day"); + } + else if (Math.abs(size) < 604800) { + return kbn.toFixedScaled(size / 10080, decimals, scaledDecimals, 4, " week"); + } + else { + return kbn.toFixedScaled(size / 5.25948e5, decimals, scaledDecimals, 5, " year"); + } + }; + + kbn.valueFormats.h = function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + + if (Math.abs(size) < 24) { + return kbn.toFixed(size, decimals) + " hour"; + } + else if (Math.abs(size) < 168) { + return kbn.toFixedScaled(size / 24, decimals, scaledDecimals, 2, " day"); + } + else if (Math.abs(size) < 8760) { + return kbn.toFixedScaled(size / 168, decimals, scaledDecimals, 3, " week"); + } + else { + return kbn.toFixedScaled(size / 8760, decimals, scaledDecimals, 4, " year"); + } + }; + + kbn.valueFormats.d = function(size, decimals, scaledDecimals) { + if (size === null) { return ""; } + + if (Math.abs(size) < 7) { + return kbn.toFixed(size, decimals) + " day"; + } + else if (Math.abs(size) < 365) { + return kbn.toFixedScaled(size / 7, decimals, scaledDecimals, 2, " week"); + } + else { + return kbn.toFixedScaled(size / 365, decimals, scaledDecimals, 3, " year"); + } + }; + + ///// FORMAT MENU ///// + + kbn.getUnitFormats = function() { + return [ + { + text: 'none', + submenu: [ + {text: 'none' , value: 'none' }, + {text: 'short', value: 'short' }, + {text: 'percent (0-100)', value: 'percent' }, + {text: 'percent (0.0-1.0)', value: 'percentunit'}, + {text: 'Humidity (%H)', value: 'humidity' }, + {text: 'ppm', value: 'ppm' }, + {text: 'decibel', value: 'dB' }, + ] + }, + { + text: 'currency', + submenu: [ + {text: 'Dollars ($)', value: 'currencyUSD'}, + {text: 'Pounds (£)', value: 'currencyGBP'}, + {text: 'Euro (€)', value: 'currencyEUR'}, + {text: 'Yen (¥)', value: 'currencyJPY'}, + ] + }, + { + text: 'time', + submenu: [ + {text: 'Hertz (1/s)', value: 'hertz'}, + {text: 'nanoseconds (ns)' , value: 'ns' }, + {text: 'microseconds (µs)', value: 'µs' }, + {text: 'milliseconds (ms)', value: 'ms' }, + {text: 'seconds (s)', value: 's' }, + {text: 'minutes (m)', value: 'm' }, + {text: 'hours (h)', value: 'h' }, + {text: 'days (d)', value: 'd' }, + ] + }, + { + text: 'data', + submenu: [ + {text: 'bits', value: 'bits' }, + {text: 'bytes', value: 'bytes' }, + {text: 'kilobytes', value: 'kbytes'}, + {text: 'megabytes', value: 'mbytes'}, + {text: 'gigabytes', value: 'gbytes'}, + ] + }, + { + text: 'data rate', + submenu: [ + {text: 'packets/sec', value: 'pps'}, + {text: 'bits/sec', value: 'bps'}, + {text: 'bytes/sec', value: 'Bps'}, + ] + }, + { + text: 'throughput', + submenu: [ + {text: 'ops/sec (ops)', value: 'ops' }, + {text: 'reads/sec (rps)', value: 'rps' }, + {text: 'writes/sec (wps)', value: 'wps' }, + {text: 'I/O ops/sec (iops)', value: 'iops'}, + ] + }, + { + text: 'length', + submenu: [ + {text: 'millimetre (mm)', value: 'lengthmm'}, + {text: 'meter (m)', value: 'lengthm' }, + {text: 'kilometer (km)', value: 'lengthkm'}, + {text: 'mile (mi)', value: 'lengthmi'}, + ] + }, + { + text: 'velocity', + submenu: [ + {text: 'm/s', value: 'velocityms' }, + {text: 'km/h', value: 'velocitykmh' }, + {text: 'mph', value: 'velocitymph' }, + {text: 'knot (kn)', value: 'velocityknot'}, + ] + }, + { + text: 'volume', + submenu: [ + {text: 'millilitre', value: 'mlitre'}, + {text: 'litre', value: 'litre' }, + ] + }, + { + text: 'energy', + submenu: [ + {text: 'watt (W)', value: 'watt' }, + {text: 'kilowatt (kW)', value: 'kwatt' }, + {text: 'watt-hour (Wh)', value: 'watth' }, + {text: 'kilowatt-hour (kWh)', value: 'kwatth'}, + {text: 'joule (J)', value: 'joule' }, + {text: 'electron volt (eV)', value: 'ev' }, + {text: 'Ampere (A)', value: 'amp' }, + {text: 'Volt (V)', value: 'volt' }, + ] + }, + { + text: 'temperature', + submenu: [ + {text: 'Celcius (°C)', value: 'celsius' }, + {text: 'Farenheit (°F)', value: 'farenheit' }, + {text: 'Kelvin (K)', value: 'kelvin' }, + ] + }, + { + text: 'pressure', + submenu: [ + {text: 'Millibars', value: 'pressurembar'}, + {text: 'Hectopascals', value: 'pressurehpa' }, + {text: 'Inches of mercury', value: 'pressurehg' }, + {text: 'PSI', value: 'pressurepsi' }, + ] + } + ]; + }; + + return kbn; +}); diff --git a/public/app/core/utils/rangeutil.ts b/public/app/core/utils/rangeutil.ts new file mode 100644 index 0000000000000..9e44a50c81f6a --- /dev/null +++ b/public/app/core/utils/rangeutil.ts @@ -0,0 +1,150 @@ +/// + +import moment = require('moment'); +import _ = require('lodash'); +import dateMath = require('app/core/utils/datemath'); +import angular = require('angular'); + +var spans = { + 's': {display: 'second'}, + 'm': {display: 'minute'}, + 'h': {display: 'hour'}, + 'd': {display: 'day'}, + 'w': {display: 'week'}, + 'M': {display: 'month'}, + 'y': {display: 'year'}, +}; + +var rangeOptions = [ + { from: 'now/d', to: 'now/d', display: 'Today', section: 2 }, + { from: 'now/d', to: 'now', display: 'The day so far', section: 2 }, + { from: 'now/w', to: 'now/w', display: 'This week', section: 2 }, + { from: 'now/w', to: 'now', display: 'Week to date', section: 2 }, + { from: 'now/M', to: 'now/M', display: 'This month', section: 2 }, + { from: 'now/y', to: 'now/y', display: 'This year', section: 2 }, + + { from: 'now-1d/d', to: 'now-1d/d', display: 'Yesterday', section: 1 }, + { from: 'now-2d/d', to: 'now-2d/d', display: 'Day before yesterday', section: 1 }, + { from: 'now-7d/d', to: 'now-7d/d', display: 'This day last week', section: 1 }, + { from: 'now-1w/w', to: 'now-1w/w', display: 'Previous week', section: 1 }, + { from: 'now-1M/M', to: 'now-1M/M', display: 'Previous month', section: 1 }, + { from: 'now-1y/y', to: 'now-1y/y', display: 'Previous year', section: 1 }, + + { from: 'now-5m', to: 'now', display: 'Last 5 minutes', section: 3 }, + { from: 'now-15m', to: 'now', display: 'Last 15 minutes', section: 3 }, + { from: 'now-30m', to: 'now', display: 'Last 30 minutes', section: 3 }, + { from: 'now-1h', to: 'now', display: 'Last 1 hour', section: 3 }, + { from: 'now-3h', to: 'now', display: 'Last 3 hours', section: 3 }, + { from: 'now-6h', to: 'now', display: 'Last 6 hours', section: 3 }, + { from: 'now-12h', to: 'now', display: 'Last 12 hours', section: 3 }, + { from: 'now-24h', to: 'now', display: 'Last 24 hours', section: 3 }, + + { from: 'now-7d', to: 'now', display: 'Last 7 days', section: 0 }, + { from: 'now-30d', to: 'now', display: 'Last 30 days', section: 0 }, + { from: 'now-60d', to: 'now', display: 'Last 60 days', section: 0 }, + { from: 'now-90d', to: 'now', display: 'Last 90 days', section: 0 }, + { from: 'now-6M', to: 'now', display: 'Last 6 months', section: 0 }, + { from: 'now-1y', to: 'now', display: 'Last 1 year', section: 0 }, + { from: 'now-2y', to: 'now', display: 'Last 2 years', section: 0 }, + { from: 'now-5y', to: 'now', display: 'Last 5 years', section: 0 }, +]; + +var absoluteFormat = 'MMM D, YYYY HH:mm:ss'; + +var rangeIndex = {}; +_.each(rangeOptions, function (frame) { + rangeIndex[frame.from + ' to ' + frame.to] = frame; +}); + + function getRelativeTimesList(timepickerSettings, currentDisplay) { + var groups = _.groupBy(rangeOptions, (option: any) => { + option.active = option.display === currentDisplay; + return option.section; + }); + + // _.each(timepickerSettings.time_options, (duration: string) => { + // let info = describeTextRange(duration); + // if (info.section) { + // groups[info.section].push(info); + // } + // }); + + return groups; + } + + function formatDate(date) { + return date.format(absoluteFormat); + } + + // handles expressions like + // 5m + // 5m to now/d + // now/d to now + // now/d + // if no to then to now is assumed + function describeTextRange(expr: any) { + if (expr.indexOf('now') === -1) { + expr = 'now-' + expr; + } + + let opt = rangeIndex[expr + ' to now']; + if (opt) { + return opt; + } + + opt = {from: expr, to: 'now'}; + + let parts = /^now-(\d+)(\w)/.exec(expr); + if (parts) { + let unit = parts[2]; + let amount = parseInt(parts[1]); + let span = spans[unit]; + if (span) { + opt.display = 'Last ' + amount + ' ' + span.display; + opt.section = span.section; + if (amount > 1) { + opt.display += 's'; + } + } + } else { + opt.display = opt.from + ' to ' + opt.to; + opt.invalid = true; + } + + return opt; + } + + function describeTimeRange(range) { + var option = rangeIndex[range.from.toString() + ' to ' + range.to.toString()]; + if (option) { + return option.display; + } + + if (moment.isMoment(range.from) && moment.isMoment(range.to)) { + return formatDate(range.from) + ' to ' + formatDate(range.to); + } + + if (moment.isMoment(range.from)) { + var toMoment = dateMath.parse(range.to, true); + return formatDate(range.from) + ' to ' + toMoment.fromNow(); + } + + if (moment.isMoment(range.to)) { + var from = dateMath.parse(range.from, false); + return from.fromNow() + ' to ' + formatDate(range.to); + } + + if (range.to.toString() === 'now') { + var res = describeTextRange(range.from); + return res.display; + } + + return range.from.toString() + ' to ' + range.to.toString(); + } + +export = { + getRelativeTimesList: getRelativeTimesList, + describeTextRange: describeTextRange, + describeTimeRange: describeTimeRange, +} + diff --git a/public/app/directives/all.js b/public/app/directives/all.js deleted file mode 100644 index 13a8accffbd33..0000000000000 --- a/public/app/directives/all.js +++ /dev/null @@ -1,22 +0,0 @@ -define([ - './arrayJoin', - './dashUpload', - './grafanaSimplePanel', - './ngBlur', - './dashEditLink', - './ngModelOnBlur', - './misc', - './confirmClick', - './configModal', - './spectrumPicker', - './tags', - './bodyClass', - './valueSelectDropdown', - './metric.segment', - './grafanaVersionCheck', - './dropdown.typeahead', - './topnav', - './giveFocus', - './annotationTooltip', - './passwordStrenght', -], function () {}); diff --git a/public/app/directives/arrayJoin.js b/public/app/directives/arrayJoin.js deleted file mode 100644 index a2b5fbe59dde8..0000000000000 --- a/public/app/directives/arrayJoin.js +++ /dev/null @@ -1,35 +0,0 @@ -define([ - 'angular', - 'app', - 'lodash' -], -function (angular, app, _) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('arrayJoin', function() { - return { - restrict: 'A', - require: 'ngModel', - link: function(scope, element, attr, ngModel) { - - function split_array(text) { - return (text || '').split(','); - } - - function join_array(text) { - if(_.isArray(text)) { - return (text || '').join(','); - } else { - return text; - } - } - - ngModel.$parsers.push(split_array); - ngModel.$formatters.push(join_array); - } - }; - }); - -}); diff --git a/public/app/directives/bodyClass.js b/public/app/directives/bodyClass.js deleted file mode 100644 index 495390a59e809..0000000000000 --- a/public/app/directives/bodyClass.js +++ /dev/null @@ -1,50 +0,0 @@ -define([ - 'angular', - 'lodash', - 'jquery' -], -function (angular, _, $) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('bodyClass', function() { - return { - link: function($scope, elem) { - - var lastHideControlsVal; - - // tooltip removal fix - $scope.$on("$routeChangeSuccess", function() { - $("#tooltip, .tooltip").remove(); - }); - - $scope.$watch('submenuEnabled', function() { - if (!$scope.dashboard) { - return; - } - - elem.toggleClass('submenu-controls-visible', $scope.submenuEnabled); - }); - - $scope.$watch('dashboard.hideControls', function() { - if (!$scope.dashboard) { - return; - } - - var hideControls = $scope.dashboard.hideControls || $scope.playlist_active; - - if (lastHideControlsVal !== hideControls) { - elem.toggleClass('hide-controls', hideControls); - lastHideControlsVal = hideControls; - } - }); - - $scope.$watch('playlistSrv', function(newValue) { - elem.toggleClass('playlist-active', _.isObject(newValue)); - }); - } - }; - }); - -}); diff --git a/public/app/directives/configModal.js b/public/app/directives/configModal.js deleted file mode 100644 index b7e579e70260f..0000000000000 --- a/public/app/directives/configModal.js +++ /dev/null @@ -1,48 +0,0 @@ -define([ - 'angular', - 'lodash', - 'jquery' -], -function (angular, _, $) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('configModal', function($modal, $q, $timeout) { - return { - restrict: 'A', - link: function(scope, elem, attrs) { - var partial = attrs.configModal; - var id = '#' + partial.replace('.html', '').replace(/[\/|\.|:]/g, '-') + '-' + scope.$id; - - elem.bind('click',function() { - if ($(id).length) { - elem.attr('data-target', id).attr('data-toggle', 'modal'); - scope.$apply(function() { scope.$broadcast('modal-opened'); }); - return; - } - - var panelModal = $modal({ - template: partial, - persist: true, - show: false, - scope: scope, - keyboard: false - }); - - $q.when(panelModal).then(function(modalEl) { - elem.attr('data-target', id).attr('data-toggle', 'modal'); - - $timeout(function () { - if (!modalEl.data('modal').isShown) { - modalEl.modal('show'); - } - }, 50); - }); - - scope.$apply(); - }); - } - }; - }); -}); diff --git a/public/app/directives/dashEditLink.js b/public/app/directives/dashEditLink.js deleted file mode 100644 index f184af76f2066..0000000000000 --- a/public/app/directives/dashEditLink.js +++ /dev/null @@ -1,107 +0,0 @@ -define([ - 'angular', - 'jquery' -], -function (angular, $) { - 'use strict'; - - var editViewMap = { - 'settings': { src: 'app/features/dashboard/partials/settings.html', title: "Settings" }, - 'annotations': { src: 'app/features/annotations/partials/editor.html', title: "Annotations" }, - 'templating': { src: 'app/features/templating/partials/editor.html', title: "Templating" } - }; - - angular - .module('grafana.directives') - .directive('dashEditorLink', function($timeout) { - return { - restrict: 'A', - link: function(scope, elem, attrs) { - var partial = attrs.dashEditorLink; - - elem.bind('click',function() { - $timeout(function() { - var editorScope = attrs.editorScope === 'isolated' ? null : scope; - scope.appEvent('show-dash-editor', { src: partial, scope: editorScope }); - }); - }); - } - }; - }); - - angular - .module('grafana.directives') - .directive('dashEditorView', function($compile, $location) { - return { - restrict: 'A', - link: function(scope, elem) { - var editorScope; - var lastEditor; - - function hideEditorPane() { - if (editorScope) { editorScope.dismiss(); } - } - - function showEditorPane(evt, payload, editview) { - if (editview) { - scope.contextSrv.editview = editViewMap[editview]; - payload.src = scope.contextSrv.editview.src; - } - - if (lastEditor === payload.src) { - hideEditorPane(); - return; - } - - hideEditorPane(); - - scope.exitFullscreen(); - - lastEditor = payload.src; - editorScope = payload.scope ? payload.scope.$new() : scope.$new(); - - editorScope.dismiss = function() { - editorScope.$destroy(); - elem.empty(); - lastEditor = null; - editorScope = null; - - if (editview) { - var urlParams = $location.search(); - if (editview === urlParams.editview) { - delete urlParams.editview; - $location.search(urlParams); - } - } - }; - - var src = "'" + payload.src + "'"; - var view = $('
    '); - - if (payload.cssClass) { - view.addClass(payload.cssClass); - } - - elem.append(view); - $compile(elem.contents())(editorScope); - } - - scope.$watch("dashboardViewState.state.editview", function(newValue, oldValue) { - if (newValue) { - showEditorPane(null, {}, newValue); - } else if (oldValue) { - scope.contextSrv.editview = null; - if (lastEditor === editViewMap[oldValue]) { - hideEditorPane(); - } - } - }); - - scope.contextSrv.editview = null; - scope.$on("$destroy", hideEditorPane); - scope.onAppEvent('hide-dash-editor', hideEditorPane); - scope.onAppEvent('show-dash-editor', showEditorPane); - } - }; - }); -}); diff --git a/public/app/directives/dropdown.typeahead.js b/public/app/directives/dropdown.typeahead.js deleted file mode 100644 index 0ab2d0045fb31..0000000000000 --- a/public/app/directives/dropdown.typeahead.js +++ /dev/null @@ -1,116 +0,0 @@ -define([ - 'angular', - 'app', - 'lodash', - 'jquery', -], -function (angular, app, _, $) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('dropdownTypeahead', function($compile) { - - var inputTemplate = ''; - - var buttonTemplate = ''; - - return { - scope: { - menuItems: "=dropdownTypeahead", - dropdownTypeaheadOnSelect: "&dropdownTypeaheadOnSelect", - model: '=ngModel' - }, - link: function($scope, elem, attrs) { - var $input = $(inputTemplate); - var $button = $(buttonTemplate); - $input.appendTo(elem); - $button.appendTo(elem); - - if (attrs.linkText) { - $button.html(attrs.linkText); - } - - if (attrs.ngModel) { - $scope.$watch('model', function(newValue) { - _.each($scope.menuItems, function(item) { - _.each(item.submenu, function(subItem) { - if (subItem.value === newValue) { - $button.html(subItem.text); - } - }); - }); - }); - } - - var typeaheadValues = _.reduce($scope.menuItems, function(memo, value, index) { - _.each(value.submenu, function(item, subIndex) { - item.click = 'menuItemSelected(' + index + ',' + subIndex + ')'; - memo.push(value.text + ' ' + item.text); - }); - return memo; - }, []); - - $scope.menuItemSelected = function(index, subIndex) { - var item = $scope.menuItems[index]; - $scope.dropdownTypeaheadOnSelect({$item: item, $subItem: item.submenu[subIndex]}); - }; - - $input.attr('data-provide', 'typeahead'); - $input.typeahead({ - source: typeaheadValues, - minLength: 1, - items: 10, - updater: function (value) { - var result = {}; - _.each($scope.menuItems, function(menuItem) { - _.each(menuItem.submenu, function(submenuItem) { - if (value === (menuItem.text + ' ' + submenuItem.text)) { - result.$item = menuItem; - result.$subItem = submenuItem; - } - }); - }); - - if (result.$item) { - $scope.$apply(function() { - $scope.dropdownTypeaheadOnSelect(result); - }); - } - - $input.trigger('blur'); - return ''; - } - }); - - $button.click(function() { - $button.hide(); - $input.show(); - $input.focus(); - }); - - $input.keyup(function() { - elem.toggleClass('open', $input.val() === ''); - }); - - $input.blur(function() { - $input.hide(); - $input.val(''); - $button.show(); - $button.focus(); - // clicking the function dropdown menu wont - // work if you remove class at once - setTimeout(function() { - elem.removeClass('open'); - }, 200); - }); - - $compile(elem.contents())($scope); - } - }; - }); -}); diff --git a/public/app/directives/giveFocus.js b/public/app/directives/giveFocus.js deleted file mode 100644 index ef395d27fbd4e..0000000000000 --- a/public/app/directives/giveFocus.js +++ /dev/null @@ -1,26 +0,0 @@ -define([ - 'angular' -], -function (angular) { - 'use strict'; - - angular.module('grafana.directives').directive('giveFocus', function() { - return function(scope, element, attrs) { - element.click(function(e) { - e.stopPropagation(); - }); - - scope.$watch(attrs.giveFocus,function (newValue) { - if (!newValue) { - return; - } - setTimeout(function() { - element.focus(); - var pos = element.val().length * 2; - element[0].setSelectionRange(pos, pos); - }, 200); - },true); - }; - }); - -}); diff --git a/public/app/directives/grafanaSimplePanel.js b/public/app/directives/grafanaSimplePanel.js deleted file mode 100644 index ce34e2841e753..0000000000000 --- a/public/app/directives/grafanaSimplePanel.js +++ /dev/null @@ -1,66 +0,0 @@ -define([ - 'angular', -], -function (angular) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('grafanaSimplePanel', function($compile) { - var panelLoading = '' + - ''+ - ' loading ...' + - ''+ - ''; - - return { - restrict: 'E', - link: function($scope, elem, attr) { - - // once we have the template, scan it for controllers and - // load the module.js if we have any - - // compile the module and uncloack. We're done - function loadModule($module) { - $module.appendTo(elem); - /* jshint indent:false */ - $compile(elem.contents())($scope); - elem.removeClass("ng-cloak"); - } - - function loadController(name) { - elem.addClass("ng-cloak"); - // load the panels module file, then render it in the dom. - var nameAsPath = name.replace(".", "/"); - $scope.require([ - 'jquery', - 'text!panels/'+nameAsPath+'/module.html' - ], function ($, moduleTemplate) { - var $module = $(moduleTemplate); - // top level controllers - var $controllers = $module.filter('ngcontroller, [ng-controller], .ng-controller'); - // add child controllers - $controllers = $controllers.add($module.find('ngcontroller, [ng-controller], .ng-controller')); - - if ($controllers.length) { - $controllers.first().prepend(panelLoading); - $scope.require([ - 'panels/'+nameAsPath+'/module' - ], function() { - loadModule($module); - }); - } else { - loadModule($module); - } - }); - } - - $scope.$watch(attr.type, function (name) { - loadController(name); - }); - - } - }; - }); - -}); diff --git a/public/app/directives/grafanaVersionCheck.js b/public/app/directives/grafanaVersionCheck.js deleted file mode 100644 index c98ce26938126..0000000000000 --- a/public/app/directives/grafanaVersionCheck.js +++ /dev/null @@ -1,33 +0,0 @@ -define([ - 'angular' -], -function (angular) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('grafanaVersionCheck', function($http, contextSrv) { - return { - restrict: 'A', - link: function(scope, elem) { - if (contextSrv.version === 'master') { - return; - } - - $http({ method: 'GET', url: 'https://grafanarel.s3.amazonaws.com/latest.json' }) - .then(function(response) { - if (!response.data || !response.data.version) { - return; - } - - if (contextSrv.version !== response.data.version) { - elem.append(' ' + - ' ' + - 'New version available: ' + response.data.version + - ''); - } - }); - } - }; - }); -}); diff --git a/public/app/directives/metric.segment.js b/public/app/directives/metric.segment.js deleted file mode 100644 index 4202cfdc332f0..0000000000000 --- a/public/app/directives/metric.segment.js +++ /dev/null @@ -1,155 +0,0 @@ -define([ - 'angular', - 'app', - 'lodash', - 'jquery', -], -function (angular, app, _, $) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('metricSegment', function($compile, $sce) { - var inputTemplate = ''; - - var buttonTemplate = ''; - - return { - scope: { - segment: "=", - getAltSegments: "&", - onValueChanged: "&" - }, - - link: function($scope, elem) { - var $input = $(inputTemplate); - var $button = $(buttonTemplate); - var segment = $scope.segment; - var options = null; - var cancelBlur = null; - - $input.appendTo(elem); - $button.appendTo(elem); - - $scope.updateVariableValue = function(value) { - if (value === '' || segment.value === value) { - return; - } - - $scope.$apply(function() { - var selected = _.findWhere($scope.altSegments, { value: value }); - if (selected) { - segment.value = selected.value; - segment.html = selected.html; - segment.fake = false; - segment.expandable = selected.expandable; - } - else { - segment.value = value; - segment.html = $sce.trustAsHtml(value); - segment.expandable = true; - segment.fake = false; - } - $scope.onValueChanged(); - }); - }; - - $scope.switchToLink = function(now) { - if (now === true || cancelBlur) { - clearTimeout(cancelBlur); - cancelBlur = null; - $input.hide(); - $button.show(); - $scope.updateVariableValue($input.val()); - } - else { - // need to have long delay because the blur - // happens long before the click event on the typeahead options - cancelBlur = setTimeout($scope.switchToLink, 50); - } - }; - - $scope.source = function(query, callback) { - if (options) { return options; } - - $scope.$apply(function() { - $scope.getAltSegments().then(function(altSegments) { - $scope.altSegments = altSegments; - options = _.map($scope.altSegments, function(alt) { return alt.value; }); - - // add custom values - if (!segment.fake && _.indexOf(options, segment.value) === -1) { - options.unshift(segment.value); - } - - callback(options); - }); - }); - }; - - $scope.updater = function(value) { - if (value === segment.value) { - clearTimeout(cancelBlur); - $input.focus(); - return value; - } - - $input.val(value); - $scope.switchToLink(true); - - return value; - }; - - $scope.matcher = function(item) { - var str = this.query; - if (str[0] === '/') { str = str.substring(1); } - if (str[str.length - 1] === '/') { str = str.substring(0, str.length-1); } - try { - return item.toLowerCase().match(str); - } catch(e) { - return false; - } - }; - - $input.attr('data-provide', 'typeahead'); - $input.typeahead({ source: $scope.source, minLength: 0, items: 10000, updater: $scope.updater, matcher: $scope.matcher }); - - var typeahead = $input.data('typeahead'); - typeahead.lookup = function () { - this.query = this.$element.val() || ''; - var items = this.source(this.query, $.proxy(this.process, this)); - return items ? this.process(items) : items; - }; - - $button.keydown(function(evt) { - // trigger typeahead on down arrow or enter key - if (evt.keyCode === 40 || evt.keyCode === 13) { - $button.click(); - } - }); - - $button.click(function() { - options = null; - $input.css('width', ($button.width() + 16) + 'px'); - - $button.hide(); - $input.show(); - $input.focus(); - - var typeahead = $input.data('typeahead'); - if (typeahead) { - $input.val(''); - typeahead.lookup(); - } - }); - - $input.blur($scope.switchToLink); - - $compile(elem.contents())($scope); - } - }; - }); -}); diff --git a/public/app/directives/misc.js b/public/app/directives/misc.js deleted file mode 100644 index 11c1334d55364..0000000000000 --- a/public/app/directives/misc.js +++ /dev/null @@ -1,130 +0,0 @@ -define([ - 'angular', - 'kbn' -], -function (angular, kbn) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('tip', function($compile) { - return { - restrict: 'E', - link: function(scope, elem, attrs) { - var _t = ''; - elem.replaceWith($compile(angular.element(_t))(scope)); - } - }; - }); - - angular - .module('grafana.directives') - .directive('watchChange', function() { - return { - scope: { onchange: '&watchChange' }, - link: function(scope, element) { - element.on('input', function() { - scope.$apply(function () { - scope.onchange({ inputValue: element.val() }); - }); - }); - } - }; - }); - - angular - .module('grafana.directives') - .directive('editorOptBool', function($compile) { - return { - restrict: 'E', - link: function(scope, elem, attrs) { - var ngchange = attrs.change ? (' ng-change="' + attrs.change + '"') : ''; - var tip = attrs.tip ? (' ' + attrs.tip + '') : ''; - var showIf = attrs.showIf ? (' ng-show="' + attrs.showIf + '" ') : ''; - - var template = '
    ' + - ' ' + - '' + - ' '; - elem.replaceWith($compile(angular.element(template))(scope)); - } - }; - }); - - angular - .module('grafana.directives') - .directive('editorCheckbox', function($compile, $interpolate) { - return { - restrict: 'E', - link: function(scope, elem, attrs) { - var text = $interpolate(attrs.text)(scope); - var ngchange = attrs.change ? (' ng-change="' + attrs.change + '"') : ''; - var tip = attrs.tip ? (' ' + attrs.tip + '') : ''; - var label = ''; - - var template = '' + - ' '; - - template = label + template; - elem.replaceWith($compile(angular.element(template))(scope)); - } - }; - }); - - angular - .module('grafana.directives') - .directive('gfDropdown', function ($parse, $compile, $timeout) { - - function buildTemplate(items, placement) { - var upclass = placement === 'top' ? 'dropup' : ''; - var ul = [ - '' - ]; - - angular.forEach(items, function (item, index) { - if (item.divider) { - return ul.splice(index + 1, 0, '
  • '); - } - - var li = '' + - '' + (item.text || '') + ''; - - if (item.submenu && item.submenu.length) { - li += buildTemplate(item.submenu).join('\n'); - } - - li += ''; - ul.splice(index + 1, 0, li); - }); - return ul; - } - - return { - restrict: 'EA', - scope: true, - link: function postLink(scope, iElement, iAttrs) { - var getter = $parse(iAttrs.gfDropdown), items = getter(scope); - $timeout(function () { - var placement = iElement.data('placement'); - var dropdown = angular.element(buildTemplate(items, placement).join('')); - dropdown.insertAfter(iElement); - $compile(iElement.next('ul.dropdown-menu'))(scope); - }); - - iElement.addClass('dropdown-toggle').attr('data-toggle', 'dropdown'); - } - }; - }); - -}); diff --git a/public/app/directives/ngBlur.js b/public/app/directives/ngBlur.js deleted file mode 100644 index aad125fe75522..0000000000000 --- a/public/app/directives/ngBlur.js +++ /dev/null @@ -1,20 +0,0 @@ -define([ - 'angular' -], -function (angular) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('ngBlur', ['$parse', function($parse) { - return function(scope, element, attr) { - var fn = $parse(attr['ngBlur']); - element.bind('blur', function(event) { - scope.$apply(function() { - fn(scope, {$event:event}); - }); - }); - }; - }]); - -}); \ No newline at end of file diff --git a/public/app/directives/ngModelOnBlur.js b/public/app/directives/ngModelOnBlur.js deleted file mode 100644 index 8057c071bdb5c..0000000000000 --- a/public/app/directives/ngModelOnBlur.js +++ /dev/null @@ -1,54 +0,0 @@ -define([ - 'angular', - 'kbn' -], -function (angular, kbn) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('ngModelOnblur', function() { - return { - restrict: 'A', - priority: 1, - require: 'ngModel', - link: function(scope, elm, attr, ngModelCtrl) { - if (attr.type === 'radio' || attr.type === 'checkbox') { - return; - } - - elm.off('input keydown change'); - elm.bind('blur', function() { - scope.$apply(function() { - ngModelCtrl.$setViewValue(elm.val()); - }); - }); - } - }; - }) - .directive('emptyToNull', function () { - return { - restrict: 'A', - require: 'ngModel', - link: function (scope, elm, attrs, ctrl) { - ctrl.$parsers.push(function (viewValue) { - if(viewValue === "") { return null; } - return viewValue; - }); - } - }; - }) - .directive('validTimeSpan', function() { - return { - require: 'ngModel', - link: function(scope, elm, attrs, ctrl) { - ctrl.$validators.integer = function(modelValue, viewValue) { - if (ctrl.$isEmpty(modelValue)) { - return true; - } - return kbn.isValidTimeSpan(viewValue); - }; - } - }; - }); -}); diff --git a/public/app/directives/passwordStrenght.js b/public/app/directives/passwordStrenght.js deleted file mode 100644 index f75a8fe8854c3..0000000000000 --- a/public/app/directives/passwordStrenght.js +++ /dev/null @@ -1,47 +0,0 @@ -define([ - 'angular', -], -function (angular) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('passwordStrength', function() { - var template = '
    ' + - '{{strengthText}}' + - '
    '; - return { - template: template, - scope: { - password: "=", - }, - link: function($scope) { - - $scope.strengthClass = ''; - - function passwordChanged(newValue) { - if (!newValue) { - $scope.strengthText = ""; - $scope.strengthClass = "hidden"; - return; - } - if (newValue.length < 4) { - $scope.strengthText = "strength: weak sauce."; - $scope.strengthClass = "password-strength-bad"; - return; - } - if (newValue.length <= 8) { - $scope.strengthText = "strength: you can do better."; - $scope.strengthClass = "password-strength-ok"; - return; - } - - $scope.strengthText = "strength: strong like a bull."; - $scope.strengthClass = "password-strength-good"; - } - - $scope.$watch("password", passwordChanged); - } - }; - }); -}); diff --git a/public/app/directives/spectrumPicker.js b/public/app/directives/spectrumPicker.js deleted file mode 100644 index c6d3de6f88458..0000000000000 --- a/public/app/directives/spectrumPicker.js +++ /dev/null @@ -1,42 +0,0 @@ -define([ - 'angular', - 'spectrum' -], -function (angular) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('spectrumPicker', function() { - return { - restrict: 'E', - require: 'ngModel', - scope: false, - replace: true, - template: "", - link: function(scope, element, attrs, ngModel) { - var input = element.find('input'); - var options = angular.extend({ - showAlpha: true, - showButtons: false, - color: ngModel.$viewValue, - change: function(color) { - scope.$apply(function() { - ngModel.$setViewValue(color.toRgbString()); - }); - } - }, scope.$eval(attrs.options)); - - ngModel.$render = function() { - input.spectrum('set', ngModel.$viewValue || ''); - }; - - input.spectrum(options); - - scope.$on('$destroy', function() { - input.spectrum('destroy'); - }); - } - }; - }); -}); diff --git a/public/app/directives/topnav.js b/public/app/directives/topnav.js deleted file mode 100644 index 88b98d55984d2..0000000000000 --- a/public/app/directives/topnav.js +++ /dev/null @@ -1,53 +0,0 @@ -define([ - 'angular', - 'kbn' -], -function (angular) { - 'use strict'; - - angular - .module('grafana.directives') - .directive('topnav', function($rootScope, contextSrv) { - return { - restrict: 'E', - transclude: true, - scope: { - title: "@", - section: "@", - titleAction: "&", - subnav: "=", - }, - template: - '', - link: function(scope, elem, attrs) { - scope.icon = attrs.icon; - scope.contextSrv = contextSrv; - - scope.toggle = function() { - $rootScope.appEvent('toggle-sidemenu'); - }; - } - }; - }); - -}); diff --git a/public/app/directives/valueSelectDropdown.js b/public/app/directives/valueSelectDropdown.js deleted file mode 100644 index 4c0a25ad693b9..0000000000000 --- a/public/app/directives/valueSelectDropdown.js +++ /dev/null @@ -1,290 +0,0 @@ -define([ - 'angular', - 'app', - 'lodash', - 'jquery', -], -function (angular, app, _) { - 'use strict'; - - angular - .module('grafana.controllers') - .controller('ValueSelectDropdownCtrl', function($q) { - var vm = this; - - vm.show = function() { - vm.oldVariableText = vm.variable.current.text; - vm.highlightIndex = -1; - - vm.options = vm.variable.options; - vm.selectedValues = _.filter(vm.options, {selected: true}); - - vm.tags = _.map(vm.variable.tags, function(value) { - var tag = { text: value, selected: false }; - _.each(vm.variable.current.tags, function(tagObj) { - if (tagObj.text === value) { - tag = tagObj; - } - }); - return tag; - }); - - vm.search = { - query: '', - options: vm.options.slice(0, Math.min(vm.options.length, 1000)) - }; - - vm.dropdownVisible = true; - }; - - vm.updateLinkText = function() { - var current = vm.variable.current; - - if (current.tags && current.tags.length) { - // filer out values that are in selected tags - var selectedAndNotInTag = _.filter(vm.variable.options, function(option) { - if (!option.selected) { return false; } - for (var i = 0; i < current.tags.length; i++) { - var tag = current.tags[i]; - if (_.indexOf(tag.values, option.value) !== -1) { - return false; - } - } - return true; - }); - - // convert values to text - var currentTexts = _.pluck(selectedAndNotInTag, 'text'); - - // join texts - vm.linkText = currentTexts.join(' + '); - if (vm.linkText.length > 0) { - vm.linkText += ' + '; - } - } else { - vm.linkText = vm.variable.current.text; - } - }; - - vm.clearSelections = function() { - _.each(vm.options, function(option) { - option.selected = false; - }); - - vm.selectionsChanged(false); - }; - - vm.selectTag = function(tag) { - tag.selected = !tag.selected; - var tagValuesPromise; - if (!tag.values) { - tagValuesPromise = vm.getValuesForTag({tagKey: tag.text}); - } else { - tagValuesPromise = $q.when(tag.values); - } - - tagValuesPromise.then(function(values) { - tag.values = values; - tag.valuesText = values.join(' + '); - _.each(vm.options, function(option) { - if (_.indexOf(tag.values, option.value) !== -1) { - option.selected = tag.selected; - } - }); - - vm.selectionsChanged(false); - }); - }; - - vm.keyDown = function (evt) { - if (evt.keyCode === 27) { - vm.hide(); - } - if (evt.keyCode === 40) { - vm.moveHighlight(1); - } - if (evt.keyCode === 38) { - vm.moveHighlight(-1); - } - if (evt.keyCode === 13) { - if (vm.search.options.length === 0) { - vm.commitChanges(); - } else { - vm.selectValue(vm.search.options[vm.highlightIndex], {}, true, false); - } - } - if (evt.keyCode === 32) { - vm.selectValue(vm.search.options[vm.highlightIndex], {}, false, false); - } - }; - - vm.moveHighlight = function(direction) { - vm.highlightIndex = (vm.highlightIndex + direction) % vm.search.options.length; - }; - - vm.selectValue = function(option, event, commitChange, excludeOthers) { - if (!option) { return; } - - option.selected = !option.selected; - - commitChange = commitChange || false; - excludeOthers = excludeOthers || false; - - var setAllExceptCurrentTo = function(newValue) { - _.each(vm.options, function(other) { - if (option !== other) { other.selected = newValue; } - }); - }; - - // commit action (enter key), should not deselect it - if (commitChange) { - option.selected = true; - } - - if (option.text === 'All' || excludeOthers) { - setAllExceptCurrentTo(false); - commitChange = true; - } - else if (!vm.variable.multi) { - setAllExceptCurrentTo(false); - commitChange = true; - } else if (event.ctrlKey || event.metaKey || event.shiftKey) { - commitChange = true; - setAllExceptCurrentTo(false); - } - - vm.selectionsChanged(commitChange); - }; - - vm.selectionsChanged = function(commitChange) { - vm.selectedValues = _.filter(vm.options, {selected: true}); - - if (vm.selectedValues.length > 1 && vm.selectedValues.length !== vm.options.length) { - if (vm.selectedValues[0].text === 'All') { - vm.selectedValues[0].selected = false; - vm.selectedValues = vm.selectedValues.slice(1, vm.selectedValues.length); - } - } - - // validate selected tags - _.each(vm.tags, function(tag) { - if (tag.selected) { - _.each(tag.values, function(value) { - if (!_.findWhere(vm.selectedValues, {value: value})) { - tag.selected = false; - } - }); - } - }); - - vm.selectedTags = _.filter(vm.tags, {selected: true}); - vm.variable.current.value = _.pluck(vm.selectedValues, 'value'); - vm.variable.current.text = _.pluck(vm.selectedValues, 'text').join(' + '); - vm.variable.current.tags = vm.selectedTags; - - // only single value - if (vm.selectedValues.length === 1) { - vm.variable.current.value = vm.selectedValues[0].value; - } - - if (commitChange) { - vm.commitChanges(); - } - }; - - vm.commitChanges = function() { - // if we have a search query and no options use that - if (vm.search.options.length === 0 && vm.search.query.length > 0) { - vm.variable.current = {text: vm.search.query, value: vm.search.query}; - } - else if (vm.selectedValues.length === 0) { - // make sure one option is selected - vm.options[0].selected = true; - vm.selectionsChanged(false); - } - - vm.dropdownVisible = false; - vm.updateLinkText(); - - if (vm.variable.current.text !== vm.oldVariableText) { - vm.onUpdated(); - } - }; - - vm.queryChanged = function() { - vm.highlightIndex = -1; - vm.search.options = _.filter(vm.options, function(option) { - return option.text.toLowerCase().indexOf(vm.search.query.toLowerCase()) !== -1; - }); - - vm.search.options = vm.search.options.slice(0, Math.min(vm.search.options.length, 1000)); - }; - - vm.init = function() { - vm.selectedTags = vm.variable.current.tags || []; - vm.updateLinkText(); - }; - - }); - - angular - .module('grafana.directives') - .directive('valueSelectDropdown', function($compile, $window, $timeout, $rootScope) { - - return { - scope: { variable: "=", onUpdated: "&", getValuesForTag: "&" }, - templateUrl: 'app/partials/valueSelectDropdown.html', - controller: 'ValueSelectDropdownCtrl', - controllerAs: 'vm', - bindToController: true, - link: function(scope, elem) { - var bodyEl = angular.element($window.document.body); - var linkEl = elem.find('.variable-value-link'); - var inputEl = elem.find('input'); - - function openDropdown() { - inputEl.css('width', Math.max(linkEl.width(), 30) + 'px'); - - inputEl.show(); - linkEl.hide(); - - inputEl.focus(); - $timeout(function() { bodyEl.on('click', bodyOnClick); }, 0, false); - } - - function switchToLink() { - inputEl.hide(); - linkEl.show(); - bodyEl.off('click', bodyOnClick); - } - - function bodyOnClick (e) { - if (elem.has(e.target).length === 0) { - scope.$apply(function() { - scope.vm.commitChanges(); - }); - } - } - - scope.$watch('vm.dropdownVisible', function(newValue) { - if (newValue) { - openDropdown(); - } else { - switchToLink(); - } - }); - - var cleanUp = $rootScope.$on('template-variable-value-updated', function() { - scope.vm.updateLinkText(); - }); - - scope.$on("$destroy", function() { - cleanUp(); - }); - - scope.vm.init(); - }, - }; - }); - -}); diff --git a/public/app/features/admin/adminEditOrgCtrl.js b/public/app/features/admin/adminEditOrgCtrl.js new file mode 100644 index 0000000000000..ec500704fb3e6 --- /dev/null +++ b/public/app/features/admin/adminEditOrgCtrl.js @@ -0,0 +1,52 @@ +define([ + 'angular', +], +function (angular) { + 'use strict'; + + var module = angular.module('grafana.controllers'); + + module.controller('AdminEditOrgCtrl', function($scope, $routeParams, backendSrv, $location) { + + $scope.init = function() { + if ($routeParams.id) { + $scope.getOrg($routeParams.id); + $scope.getOrgUsers($routeParams.id); + } + }; + + $scope.getOrg = function(id) { + backendSrv.get('/api/orgs/' + id).then(function(org) { + $scope.org = org; + }); + }; + + $scope.getOrgUsers = function(id) { + backendSrv.get('/api/orgs/' + id + '/users').then(function(orgUsers) { + $scope.orgUsers = orgUsers; + }); + }; + + $scope.update = function() { + if (!$scope.orgDetailsForm.$valid) { return; } + + backendSrv.put('/api/orgs/' + $scope.org.id, $scope.org).then(function() { + $location.path('/admin/orgs'); + }); + }; + + $scope.updateOrgUser= function(orgUser) { + backendSrv.patch('/api/orgs/' + orgUser.orgId + '/users/' + orgUser.userId, orgUser); + }; + + $scope.removeOrgUser = function(orgUser) { + backendSrv.delete('/api/orgs/' + orgUser.orgId + '/users/' + orgUser.userId).then(function() { + $scope.getOrgUsers($scope.org.id); + }); + }; + + $scope.init(); + + }); + +}); diff --git a/public/app/features/admin/adminListOrgsCtrl.js b/public/app/features/admin/adminListOrgsCtrl.js new file mode 100644 index 0000000000000..c3f727191fcfc --- /dev/null +++ b/public/app/features/admin/adminListOrgsCtrl.js @@ -0,0 +1,39 @@ +define([ + 'angular', +], +function (angular) { + 'use strict'; + + var module = angular.module('grafana.controllers'); + + module.controller('AdminListOrgsCtrl', function($scope, backendSrv) { + + $scope.init = function() { + $scope.getOrgs(); + }; + + $scope.getOrgs = function() { + backendSrv.get('/api/orgs').then(function(orgs) { + $scope.orgs = orgs; + }); + }; + + $scope.deleteOrg = function(org) { + $scope.appEvent('confirm-modal', { + title: 'Do you want to delete organization ' + org.name + '?', + text: 'All dashboards for this organization will be removed!', + icon: 'fa-trash', + yesText: 'Delete', + onConfirm: function() { + backendSrv.delete('/api/orgs/' + org.id).then(function() { + $scope.getOrgs(); + }); + } + }); + }; + + $scope.init(); + + }); + +}); diff --git a/public/app/features/admin/adminUsersCtrl.js b/public/app/features/admin/adminListUsersCtrl.js similarity index 90% rename from public/app/features/admin/adminUsersCtrl.js rename to public/app/features/admin/adminListUsersCtrl.js index 737812c547415..c89deafaf0f66 100644 --- a/public/app/features/admin/adminUsersCtrl.js +++ b/public/app/features/admin/adminListUsersCtrl.js @@ -6,7 +6,7 @@ function (angular) { var module = angular.module('grafana.controllers'); - module.controller('AdminUsersCtrl', function($scope, backendSrv) { + module.controller('AdminListUsersCtrl', function($scope, backendSrv) { $scope.init = function() { $scope.getUsers(); diff --git a/public/app/features/admin/all.js b/public/app/features/admin/all.js index 35f1f4f5db9d6..14bff249b0e20 100644 --- a/public/app/features/admin/all.js +++ b/public/app/features/admin/all.js @@ -1,5 +1,7 @@ define([ - './adminUsersCtrl', + './adminListUsersCtrl', + './adminListOrgsCtrl', + './adminEditOrgCtrl', './adminEditUserCtrl', './adminSettingsCtrl', ], function () {}); diff --git a/public/app/features/admin/partials/edit_org.html b/public/app/features/admin/partials/edit_org.html new file mode 100644 index 0000000000000..aef042894ab36 --- /dev/null +++ b/public/app/features/admin/partials/edit_org.html @@ -0,0 +1,60 @@ + + + + +
    +
    +

    + Organization Details +

    + +
    +
    +
    +
      +
    • + Name +
    • +
    • + +
    • +
    +
    +
    +
    + +
    + +
    + +

    + Organization Users +

    + + + + + + + + + + + + + + +
    UsernameEmailRole
    {{orgUser.login}}{{orgUser.email}} + + + + + +
    + +
    +
    diff --git a/public/app/features/admin/partials/edit_user.html b/public/app/features/admin/partials/edit_user.html index a1a4cb989cded..674f1cf4b0ddc 100644 --- a/public/app/features/admin/partials/edit_user.html +++ b/public/app/features/admin/partials/edit_user.html @@ -9,7 +9,7 @@

    - User details + Edit User

    @@ -17,7 +17,7 @@

    • - Name + Name
    • @@ -28,7 +28,7 @@

      • - Email + Email
      • @@ -39,7 +39,7 @@

        • - Username + Username
        • @@ -53,16 +53,16 @@

          -

          +

          Change password -

          +

        • - New password + New password
        • @@ -76,9 +76,9 @@

          -

          +

          Permissions -

          +

      @@ -97,9 +97,9 @@


      -

      +

      Organizations -

      +

    diff --git a/public/app/features/admin/partials/orgs.html b/public/app/features/admin/partials/orgs.html index 1bb8996a825c0..573c0799c68fc 100644 --- a/public/app/features/admin/partials/orgs.html +++ b/public/app/features/admin/partials/orgs.html @@ -1,6 +1,6 @@ @@ -10,6 +10,27 @@

    Organizations

    - View not implemented yet... + + + + + + + + + + + +
    IdName
    {{org.id}}{{org.name}} + + + Edit + +    + + + +
    +
    diff --git a/public/app/features/admin/partials/users.html b/public/app/features/admin/partials/users.html index 6d4f7a8671c44..6f8365e75dd77 100644 --- a/public/app/features/admin/partials/users.html +++ b/public/app/features/admin/partials/users.html @@ -1,6 +1,6 @@ diff --git a/public/app/features/all.js b/public/app/features/all.js index 92db77a9a5d08..6519d112b74ca 100644 --- a/public/app/features/all.js +++ b/public/app/features/all.js @@ -7,6 +7,6 @@ define([ './panel/all', './profile/profileCtrl', './profile/changePasswordCtrl', - './org/all', + './profile/selectOrgCtrl', './admin/all', ], function () {}); diff --git a/public/app/features/annotations/annotationsSrv.js b/public/app/features/annotations/annotationsSrv.js index a4529de201918..f131ad2839329 100644 --- a/public/app/features/annotations/annotationsSrv.js +++ b/public/app/features/annotations/annotationsSrv.js @@ -7,14 +7,14 @@ define([ var module = angular.module('grafana.services'); - module.service('annotationsSrv', function(datasourceSrv, $q, alertSrv, $rootScope) { + module.service('annotationsSrv', function($rootScope, $q, datasourceSrv, alertSrv, timeSrv) { var promiseCached; var list = []; var self = this; this.init = function() { - $rootScope.onAppEvent('refresh', this.clearCache); - $rootScope.onAppEvent('setup-dashboard', this.clearCache); + $rootScope.onAppEvent('refresh', this.clearCache, $rootScope); + $rootScope.onAppEvent('dashboard-loaded', this.clearCache, $rootScope); }; this.clearCache = function() { @@ -22,7 +22,7 @@ define([ list = []; }; - this.getAnnotations = function(rangeUnparsed, dashboard) { + this.getAnnotations = function(dashboard) { if (dashboard.annotations.list.length === 0) { return $q.when(null); } @@ -34,9 +34,13 @@ define([ self.dashboard = dashboard; var annotations = _.where(dashboard.annotations.list, {enable: true}); + var range = timeSrv.timeRange(); + var rangeRaw = timeSrv.timeRange(false); + var promises = _.map(annotations, function(annotation) { return datasourceSrv.get(annotation.datasource).then(function(datasource) { - return datasource.annotationQuery(annotation, rangeUnparsed) + var query = {range: range, rangeRaw: rangeRaw, annotation: annotation}; + return datasource.annotationQuery(query) .then(self.receiveAnnotationResults) .then(null, errorHandler); }, this); diff --git a/public/app/features/annotations/editorCtrl.js b/public/app/features/annotations/editorCtrl.js index 39b072263fcd9..5a509fe4ca906 100644 --- a/public/app/features/annotations/editorCtrl.js +++ b/public/app/features/annotations/editorCtrl.js @@ -20,15 +20,13 @@ function (angular, _, $) { }; $scope.init = function() { - $scope.editor = { index: 0 }; + $scope.mode = 'list'; $scope.datasources = datasourceSrv.getAnnotationSources(); $scope.annotations = $scope.dashboard.annotations.list; $scope.reset(); - $scope.$watch('editor.index', function(newVal) { - if (newVal !== 2) { - $scope.reset(); - } + $scope.$watch('mode', function(newVal) { + if (newVal === 'new') { $scope.reset(); } }); }; @@ -43,8 +41,8 @@ function (angular, _, $) { $scope.currentAnnotation = annotation; $scope.currentIsNew = false; $scope.datasourceChanged(); + $scope.mode = 'edit'; - $scope.editor.index = 2; $(".tooltip.in").remove(); }; @@ -57,14 +55,14 @@ function (angular, _, $) { $scope.update = function() { $scope.reset(); - $scope.editor.index = 0; + $scope.mode = 'list'; $scope.broadcastRefresh(); }; $scope.add = function() { $scope.annotations.push($scope.currentAnnotation); $scope.reset(); - $scope.editor.index = 0; + $scope.mode = 'list'; $scope.updateSubmenuVisibility(); $scope.broadcastRefresh(); }; diff --git a/public/app/features/annotations/partials/editor.html b/public/app/features/annotations/partials/editor.html index 799b1f69fe42a..851db4b4f01a1 100644 --- a/public/app/features/annotations/partials/editor.html +++ b/public/app/features/annotations/partials/editor.html @@ -6,9 +6,27 @@ Annotations
    -
    -
    -
    +
    -
    + +
    No annotations defined @@ -47,7 +66,7 @@
    -
    +
    @@ -72,15 +91,13 @@
    -
    -
    +
    - - + +

    -
    diff --git a/public/app/features/dashboard/all.js b/public/app/features/dashboard/all.js index d6e713070e9c7..0ee3a64a80662 100644 --- a/public/app/features/dashboard/all.js +++ b/public/app/features/dashboard/all.js @@ -16,6 +16,7 @@ define([ './timeSrv', './unsavedChangesSrv', './directives/dashSearchView', + './timepicker/timepicker', './graphiteImportCtrl', './dynamicDashboardSrv', './importCtrl', diff --git a/public/app/features/dashboard/dashboardCtrl.js b/public/app/features/dashboard/dashboardCtrl.js index f25fea26afe5e..cf0715655150d 100644 --- a/public/app/features/dashboard/dashboardCtrl.js +++ b/public/app/features/dashboard/dashboardCtrl.js @@ -1,8 +1,7 @@ define([ 'angular', 'jquery', - 'config', - 'lodash', + 'app/core/config', ], function (angular, $, config) { "use strict"; @@ -29,7 +28,7 @@ function (angular, $, config) { var resizeEventTimeout; this.init = function(dashboard) { - $scope.reset_row(); + $scope.resetRow(); $scope.registerWindowResizeEvent(); $scope.onAppEvent('show-json-editor', $scope.showJsonEditor); $scope.setupDashboard(dashboard); @@ -64,7 +63,7 @@ function (angular, $, config) { $scope.appEvent("dashboard-loaded", $scope.dashboard); }).catch(function(err) { - console.log('Failed to initialize dashboard', err); + if (err.data && err.data.message) { err.message = err.data.message; } $scope.appEvent("alert-error", ['Dashboard init failed', 'Template variables could not be initialized: ' + err.message]); }); }; @@ -88,17 +87,17 @@ function (angular, $, config) { $rootScope.$broadcast('refresh'); }; - $scope.add_row = function(dash, row) { + $scope.addRow = function(dash, row) { dash.rows.push(row); }; - $scope.add_row_default = function() { - $scope.reset_row(); + $scope.addRowDefault = function() { + $scope.resetRow(); $scope.row.title = 'New row'; - $scope.add_row($scope.dashboard, $scope.row); + $scope.addRow($scope.dashboard, $scope.row); }; - $scope.reset_row = function() { + $scope.resetRow = function() { $scope.row = { title: '', height: '250px', diff --git a/public/app/features/dashboard/dashboardLoaderSrv.js b/public/app/features/dashboard/dashboardLoaderSrv.js index f2fc3c9c03f45..f578a9d107513 100644 --- a/public/app/features/dashboard/dashboardLoaderSrv.js +++ b/public/app/features/dashboard/dashboardLoaderSrv.js @@ -3,9 +3,10 @@ define([ 'moment', 'lodash', 'jquery', - 'kbn', + 'app/core/utils/kbn', + 'app/core/utils/datemath', ], -function (angular, moment, _, $, kbn) { +function (angular, moment, _, $, kbn, dateMath) { 'use strict'; var module = angular.module('grafana.services'); @@ -59,8 +60,8 @@ function (angular, moment, _, $, kbn) { }; /*jshint -W054 */ - var script_func = new Function('ARGS','kbn','_','moment','window','document','$','jQuery', 'services', result.data); - var script_result = script_func($routeParams, kbn, _ , moment, window, document, $, $, services); + var script_func = new Function('ARGS','kbn','dateMath','_','moment','window','document','$','jQuery', 'services', result.data); + var script_result = script_func($routeParams, kbn, dateMath, _ , moment, window, document, $, $, services); // Handle async dashboard scripts if (_.isFunction(script_result)) { diff --git a/public/app/features/dashboard/dashboardNavCtrl.js b/public/app/features/dashboard/dashboardNavCtrl.js index 27ce528f69e7d..56a64c4933f7c 100644 --- a/public/app/features/dashboard/dashboardNavCtrl.js +++ b/public/app/features/dashboard/dashboardNavCtrl.js @@ -1,9 +1,7 @@ define([ 'angular', 'lodash', - 'config', - 'store', - 'filesaver' + 'vendor/filesaver' ], function (angular, _) { 'use strict'; @@ -46,9 +44,9 @@ function (angular, _) { $scope.appEvent('show-dash-search'); }; - $scope.dashboardTitleAction = function() { - $scope.appEvent('hide-dash-editor'); - $scope.exitFullscreen(); + $scope.hideTooltip = function(evt) { + angular.element(evt.currentTarget).tooltip('hide'); + $scope.appEvent('hide-dash-search'); }; $scope.saveDashboard = function(options) { diff --git a/public/app/features/dashboard/dashboardSrv.js b/public/app/features/dashboard/dashboardSrv.js index 9ddce2dbea875..73919ce0e017a 100644 --- a/public/app/features/dashboard/dashboardSrv.js +++ b/public/app/features/dashboard/dashboardSrv.js @@ -1,11 +1,10 @@ define([ 'angular', 'jquery', - 'kbn', 'lodash', 'moment', ], -function (angular, $, kbn, _, moment) { +function (angular, $, _, moment) { 'use strict'; var module = angular.module('grafana.services'); @@ -27,12 +26,12 @@ function (angular, $, kbn, _, moment) { this.tags = data.tags || []; this.style = data.style || "dark"; this.timezone = data.timezone || 'browser'; - this.editable = data.editable === false ? false : true; + this.editable = data.editable !== false; this.hideControls = data.hideControls || false; this.sharedCrosshair = data.sharedCrosshair || false; this.rows = data.rows || []; - this.nav = data.nav || []; this.time = data.time || { from: 'now-6h', to: 'now' }; + this.timepicker = data.timepicker || {}; this.templating = this._ensureListExist(data.templating); this.annotations = this._ensureListExist(data.annotations); this.refresh = data.refresh; @@ -40,11 +39,6 @@ function (angular, $, kbn, _, moment) { this.schemaVersion = data.schemaVersion || 0; this.version = data.version || 0; this.links = data.links || []; - - if (this.nav.length === 0) { - this.nav.push({ type: 'timepicker' }); - } - this._updateSchema(data); this._initMeta(meta); } @@ -54,10 +48,10 @@ function (angular, $, kbn, _, moment) { p._initMeta = function(meta) { meta = meta || {}; - meta.canShare = meta.canShare === false ? false : true; - meta.canSave = meta.canSave === false ? false : true; - meta.canStar = meta.canStar === false ? false : true; - meta.canEdit = meta.canEdit === false ? false : true; + meta.canShare = meta.canShare !== false; + meta.canSave = meta.canSave !== false; + meta.canStar = meta.canStar !== false; + meta.canEdit = meta.canEdit !== false; if (!this.editable) { meta.canEdit = false; @@ -123,7 +117,7 @@ function (angular, $, kbn, _, moment) { },0); }; - p.add_panel = function(panel, row) { + p.addPanel = function(panel, row) { var rowSpan = this.rowSpan(row); var panelCount = row.panels.length; var space = (12 - rowSpan) - panel.span; @@ -157,7 +151,6 @@ function (angular, $, kbn, _, moment) { result.panel = panel; result.row = row; result.index = index; - return; } }); }); @@ -174,26 +167,71 @@ function (angular, $, kbn, _, moment) { var newPanel = angular.copy(panel); newPanel.id = this.getNextPanelId(); + delete newPanel.repeat; + delete newPanel.repeatIteration; + delete newPanel.repeatPanelId; + delete newPanel.scopedVars; + var currentRow = this.rows[rowIndex]; currentRow.panels.push(newPanel); return newPanel; }; + p.getNextQueryLetter = function(panel) { + var letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'; + + return _.find(letters, function(refId) { + return _.every(panel.targets, function(other) { + return other.refId !== refId; + }); + }); + }; + + p.addDataQueryTo = function(panel, datasource) { + var target = { + refId: this.getNextQueryLetter(panel) + }; + + if (datasource) { + target.datasource = datasource.name; + } + + panel.targets.push(target); + }; + + p.removeDataQuery = function (panel, query) { + panel.targets = _.without(panel.targets, query); + }; + + p.duplicateDataQuery = function(panel, query) { + var clone = angular.copy(query); + clone.refId = this.getNextQueryLetter(panel); + panel.targets.push(clone); + }; + + p.moveDataQuery = function(panel, fromIndex, toIndex) { + _.move(panel.targets, fromIndex, toIndex); + }; + p.formatDate = function(date, format) { + if (!moment.isMoment(date)) { + date = moment(date); + } + format = format || 'YYYY-MM-DD HH:mm:ss'; return this.timezone === 'browser' ? - moment(date).format(format) : - moment.utc(date).format(format); + moment(date).format(format) : + moment.utc(date).format(format); }; p._updateSchema = function(old) { var i, j, k; var oldVersion = this.schemaVersion; var panelUpgrades = []; - this.schemaVersion = 6; + this.schemaVersion = 8; - if (oldVersion === 6) { + if (oldVersion === 8) { return; } @@ -288,6 +326,65 @@ function (angular, $, kbn, _, moment) { } } + if (oldVersion < 7) { + if (old.nav && old.nav.length) { + this.timepicker = old.nav[0]; + delete this.nav; + } + + // ensure query refIds + panelUpgrades.push(function(panel) { + _.each(panel.targets, function(target) { + if (!target.refId) { + target.refId = this.getNextQueryLetter(panel); + } + }, this); + }); + } + + if (oldVersion < 8) { + panelUpgrades.push(function(panel) { + _.each(panel.targets, function(target) { + // update old influxdb query schema + if (target.fields && target.tags && target.groupBy) { + if (target.rawQuery) { + delete target.fields; + delete target.fill; + } else { + target.select = _.map(target.fields, function(field) { + var parts = []; + parts.push({type: 'field', params: [field.name]}); + parts.push({type: field.func, params: []}); + if (field.mathExpr) { + parts.push({type: 'math', params: [field.mathExpr]}); + } + if (field.asExpr) { + parts.push({type: 'alias', params: [field.asExpr]}); + } + return parts; + }); + delete target.fields; + _.each(target.groupBy, function(part) { + if (part.type === 'time' && part.interval) { + part.params = [part.interval]; + delete part.interval; + } + if (part.type === 'tag' && part.key) { + part.params = [part.key]; + delete part.key; + } + }); + + if (target.fill) { + target.groupBy.push({type: 'fill', params: [target.fill]}); + delete target.fill; + } + } + } + }); + }); + } + if (panelUpgrades.length === 0) { return; } @@ -296,7 +393,7 @@ function (angular, $, kbn, _, moment) { var row = this.rows[i]; for (j = 0; j < row.panels.length; j++) { for (k = 0; k < panelUpgrades.length; k++) { - panelUpgrades[k](row.panels[j]); + panelUpgrades[k].call(this, row.panels[j]); } } } diff --git a/public/app/features/dashboard/directives/dashSearchView.js b/public/app/features/dashboard/directives/dashSearchView.js index ad73b84043ca9..a274568a03169 100644 --- a/public/app/features/dashboard/directives/dashSearchView.js +++ b/public/app/features/dashboard/directives/dashSearchView.js @@ -54,7 +54,14 @@ function (angular, $) { hookUpHideWhenClickedOutside(); } + function hideSearch() { + if (editorScope) { + editorScope.dismiss(); + } + } + scope.onAppEvent('show-dash-search', showSearch); + scope.onAppEvent('hide-dash-search', hideSearch); } }; }); diff --git a/public/app/features/dashboard/graphiteImportCtrl.js b/public/app/features/dashboard/graphiteImportCtrl.js index be912610e3615..5333888327471 100644 --- a/public/app/features/dashboard/graphiteImportCtrl.js +++ b/public/app/features/dashboard/graphiteImportCtrl.js @@ -1,10 +1,9 @@ define([ 'angular', - 'app', 'lodash', - 'kbn' + 'app/core/utils/kbn' ], -function (angular, app, _, kbn) { +function (angular, _, kbn) { 'use strict'; var module = angular.module('grafana.controllers'); @@ -90,7 +89,7 @@ function (angular, app, _, kbn) { }); window.grafanaImportDashboard = newDashboard; - $location.path('/dashboard/import/' + kbn.slugifyForUrl(newDashboard.title)); + $location.path('/dashboard-import/' + kbn.slugifyForUrl(newDashboard.title)); } }); }); diff --git a/public/app/features/dashboard/keybindings.js b/public/app/features/dashboard/keybindings.js index b417382bc4505..2219cab2d4cc4 100644 --- a/public/app/features/dashboard/keybindings.js +++ b/public/app/features/dashboard/keybindings.js @@ -33,7 +33,7 @@ function(angular, $) { }, { inputDisabled: true }); - keyboardManager.bind('ctrl+f', function() { + keyboardManager.bind('f', function() { scope.appEvent('show-dash-search'); }, { inputDisabled: true }); diff --git a/public/app/features/dashboard/partials/dashboardTopNav.html b/public/app/features/dashboard/partials/dashboardTopNav.html index 58445abf5054d..8fa6da1ee2f04 100644 --- a/public/app/features/dashboard/partials/dashboardTopNav.html +++ b/public/app/features/dashboard/partials/dashboardTopNav.html @@ -30,7 +30,7 @@
    diff --git a/public/app/features/dashboard/partials/graphiteImport.html b/public/app/features/dashboard/partials/graphiteImport.html index 343b5d52e412e..9c351346fe652 100644 --- a/public/app/features/dashboard/partials/graphiteImport.html +++ b/public/app/features/dashboard/partials/graphiteImport.html @@ -25,7 +25,7 @@

    Load dashboard from Graphite-Web

    diff --git a/public/app/features/dashboard/partials/import.html b/public/app/features/dashboard/partials/import.html index a942297ac5925..4b44544dc7b74 100644 --- a/public/app/features/dashboard/partials/import.html +++ b/public/app/features/dashboard/partials/import.html @@ -1,6 +1,6 @@ diff --git a/public/app/features/dashboard/partials/saveDashboardAs.html b/public/app/features/dashboard/partials/saveDashboardAs.html index 8d04280465e8e..5c069f199e6ce 100644 --- a/public/app/features/dashboard/partials/saveDashboardAs.html +++ b/public/app/features/dashboard/partials/saveDashboardAs.html @@ -14,7 +14,7 @@

    New title

    - +

    diff --git a/public/app/features/dashboard/partials/settings.html b/public/app/features/dashboard/partials/settings.html index 5ddfb1c57e3a6..541ef45e9abd7 100644 --- a/public/app/features/dashboard/partials/settings.html +++ b/public/app/features/dashboard/partials/settings.html @@ -5,9 +5,7 @@
    -
    -
    -
    +
    @@ -40,7 +38,7 @@
    Dashboard info
    -
    +
    • Timezone @@ -57,7 +55,7 @@
      Dashboard info
      Toggles
      -
      +
      • @@ -65,7 +63,7 @@
        Toggles
      • -
      • +
      @@ -79,28 +77,30 @@
      Toggles
      Rows settings
      -
      -
        -
      • - Title -
      • -
      • - -
      • -
      • - -
      • -
      • - -
      • -
      • - -
      • -
      • - -
      • -
      -
      +
      +
      +
        +
      • + Title +
      • +
      • + +
      • +
      • + +
      • +
      • + +
      • +
      • + +
      • +
      • + +
      • +
      +
      +
      @@ -110,9 +110,8 @@
      Rows settings
      -
      - - +
      +
      diff --git a/public/app/features/dashboard/partials/snapshotTopNav.html b/public/app/features/dashboard/partials/snapshotTopNav.html index 68e8b9532d66c..57f5d9181d4c8 100644 --- a/public/app/features/dashboard/partials/snapshotTopNav.html +++ b/public/app/features/dashboard/partials/snapshotTopNav.html @@ -26,9 +26,8 @@
    diff --git a/public/app/features/dashboard/playlistCtrl.js b/public/app/features/dashboard/playlistCtrl.js index b5d04374e9a63..9242905405ab5 100644 --- a/public/app/features/dashboard/playlistCtrl.js +++ b/public/app/features/dashboard/playlistCtrl.js @@ -1,7 +1,7 @@ define([ 'angular', 'lodash', - 'config' + 'app/core/config' ], function (angular, _, config) { 'use strict'; diff --git a/public/app/features/dashboard/playlistSrv.js b/public/app/features/dashboard/playlistSrv.js index 9997581fbc342..bf4587f6c48ad 100644 --- a/public/app/features/dashboard/playlistSrv.js +++ b/public/app/features/dashboard/playlistSrv.js @@ -1,8 +1,7 @@ define([ 'angular', 'lodash', - 'kbn', - 'store' + 'app/core/utils/kbn', ], function (angular, _, kbn) { 'use strict'; diff --git a/public/app/features/dashboard/rowCtrl.js b/public/app/features/dashboard/rowCtrl.js index c63017365bb73..723481d2d65de 100644 --- a/public/app/features/dashboard/rowCtrl.js +++ b/public/app/features/dashboard/rowCtrl.js @@ -1,10 +1,9 @@ define([ 'angular', - 'app', 'lodash', - 'config' + 'app/core/config' ], -function (angular, app, _, config) { +function (angular, _, config) { 'use strict'; var module = angular.module('grafana.controllers'); @@ -29,7 +28,7 @@ function (angular, app, _, config) { $scope.panelMenuPos = posX; }; - $scope.toggle_row = function(row) { + $scope.toggleRow = function(row) { row.collapse = row.collapse ? false : true; if (!row.collapse) { $timeout(function() { @@ -38,31 +37,52 @@ function (angular, app, _, config) { } }; - $scope.add_panel = function(panel) { - $scope.dashboard.add_panel(panel, $scope.row); + $scope.addPanel = function(panel) { + $scope.dashboard.addPanel(panel, $scope.row); }; - $scope.delete_row = function() { + $scope.deleteRow = function() { $scope.appEvent('confirm-modal', { title: 'Are you sure you want to delete this row?', icon: 'fa-trash', - yesText: 'delete', + yesText: 'Delete', onConfirm: function() { $scope.dashboard.rows = _.without($scope.dashboard.rows, $scope.row); } }); }; - $scope.move_row = function(direction) { + $scope.moveRow = function(direction) { var rowsList = $scope.dashboard.rows; var rowIndex = _.indexOf(rowsList, $scope.row); - var newIndex = rowIndex + direction; + var newIndex = rowIndex; + switch(direction) { + case 'up': { + newIndex = rowIndex - 1; + break; + } + case 'down': { + newIndex = rowIndex + 1; + break; + } + case 'top': { + newIndex = 0; + break; + } + case 'bottom': { + newIndex = rowsList.length - 1; + break; + } + default: { + newIndex = rowIndex; + } + } if (newIndex >= 0 && newIndex <= (rowsList.length - 1)) { - _.move(rowsList, rowIndex, rowIndex + direction); + _.move(rowsList, rowIndex, newIndex); } }; - $scope.add_panel_default = function(type) { + $scope.addPanelDefault = function(type) { var defaultSpan = 12; var _as = 12 - $scope.dashboard.rowSpan($scope.row); @@ -71,17 +91,18 @@ function (angular, app, _, config) { error: false, span: _as < defaultSpan && _as > 0 ? _as : defaultSpan, editable: true, - type: type + type: type, + isNew: true, }; - $scope.add_panel(panel); + $scope.addPanel(panel); $timeout(function() { - $scope.$broadcast('render'); + $scope.dashboardViewState.update({fullscreen: true, edit: true, panelId: panel.id }); }); }; - $scope.set_height = function(height) { + $scope.setHeight = function(height) { $scope.row.height = height; $scope.$broadcast('render'); }; @@ -98,7 +119,7 @@ function (angular, app, _, config) { }; $scope.updatePanelSpan = function(panel, span) { - panel.span = Math.min(Math.max(panel.span + span, 1), 12); + panel.span = Math.min(Math.max(Math.floor(panel.span + span), 1), 12); }; $scope.replacePanel = function(newPanel, oldPanel) { @@ -121,7 +142,18 @@ function (angular, app, _, config) { module.directive('rowHeight', function() { return function(scope, element) { scope.$watchGroup(['row.collapse', 'row.height'], function() { - element[0].style.minHeight = scope.row.collapse ? '5px' : scope.row.height; + element.css({ minHeight: scope.row.collapse ? '5px' : scope.row.height }); + }); + + scope.onAppEvent('panel-fullscreen-enter', function(evt, info) { + var hasPanel = _.findWhere(scope.row.panels, {id: info.panelId}); + if (!hasPanel) { + element.hide(); + } + }); + + scope.onAppEvent('panel-fullscreen-exit', function() { + element.show(); }); }; }); @@ -132,6 +164,22 @@ function (angular, app, _, config) { element[0].style.width = ((scope.panel.span / 1.2) * 10) + '%'; } + scope.onAppEvent('panel-fullscreen-enter', function(evt, info) { + if (scope.panel.id !== info.panelId) { + element.hide(); + } else { + element[0].style.width = '100%'; + } + }); + + scope.onAppEvent('panel-fullscreen-exit', function(evt, info) { + if (scope.panel.id !== info.panelId) { + element.show(); + } else { + updateWidth(); + } + }); + scope.$watch('panel.span', updateWidth); }; }); diff --git a/public/app/features/dashboard/saveDashboardAsCtrl.js b/public/app/features/dashboard/saveDashboardAsCtrl.js index ec8327755f825..92c93b7e4852e 100644 --- a/public/app/features/dashboard/saveDashboardAsCtrl.js +++ b/public/app/features/dashboard/saveDashboardAsCtrl.js @@ -25,6 +25,12 @@ function (angular) { }); } + $scope.keyDown = function (evt) { + if (evt.keyCode === 13) { + $scope.saveClone(); + } + }; + $scope.saveClone = function() { saveDashboard({overwrite: false}).then(null, function(err) { if (err.data && err.data.status === "name-exists") { diff --git a/public/app/features/dashboard/shareModalCtrl.js b/public/app/features/dashboard/shareModalCtrl.js index 55ec0c8a41067..b723660abe77c 100644 --- a/public/app/features/dashboard/shareModalCtrl.js +++ b/public/app/features/dashboard/shareModalCtrl.js @@ -2,7 +2,7 @@ define([ 'angular', 'lodash', 'require', - 'config', + 'app/core/config', ], function (angular, _, require, config) { 'use strict'; @@ -44,8 +44,8 @@ function (angular, _, require, config) { var params = angular.copy($location.search()); var range = timeSrv.timeRange(); - params.from = range.from.getTime(); - params.to = range.to.getTime(); + params.from = range.from.valueOf(); + params.to = range.to.valueOf(); if ($scope.options.includeTemplateVars) { templateSrv.fillVariableValuesForUrl(params); @@ -75,7 +75,7 @@ function (angular, _, require, config) { $scope.iframeHtml = ''; - $scope.imageUrl = soloUrl.replace('/dashboard', '/render/dashboard'); + $scope.imageUrl = soloUrl.replace('/dashboard-solo/', '/render/dashboard-solo/'); $scope.imageUrl += '&width=1000'; $scope.imageUrl += '&height=500'; }; @@ -84,9 +84,9 @@ function (angular, _, require, config) { module.directive('clipboardButton',function() { return function(scope, elem) { - require(['ZeroClipboard'], function(ZeroClipboard) { + require(['vendor/zero_clipboard'], function(ZeroClipboard) { ZeroClipboard.config({ - swfPath: config.appSubUrl + '/public/vendor/ZeroClipboard.swf' + swfPath: config.appSubUrl + '/public/vendor/zero_clipboard.swf' }); new ZeroClipboard(elem[0]); }); diff --git a/public/app/features/dashboard/shareSnapshotCtrl.js b/public/app/features/dashboard/shareSnapshotCtrl.js index 19adbd331f537..903353ce9b4bf 100644 --- a/public/app/features/dashboard/shareSnapshotCtrl.js +++ b/public/app/features/dashboard/shareSnapshotCtrl.js @@ -19,7 +19,7 @@ function (angular, _) { $scope.expireOptions = [ {text: '1 Hour', value: 60*60}, {text: '1 Day', value: 60*60*24}, - {text: '7 Days', value: 60*60*7}, + {text: '7 Days', value: 60*60*24*7}, {text: 'Never', value: 0}, ]; diff --git a/public/app/features/dashboard/timeSrv.js b/public/app/features/dashboard/timeSrv.js index 6bb9ccde22354..691bfd07904da 100644 --- a/public/app/features/dashboard/timeSrv.js +++ b/public/app/features/dashboard/timeSrv.js @@ -1,10 +1,11 @@ define([ 'angular', 'lodash', - 'config', - 'kbn', - 'moment' -], function (angular, _, config, kbn, moment) { + 'moment', + 'app/core/config', + 'app/core/utils/kbn', + 'app/core/utils/datemath' +], function (angular, _, moment, config, kbn, dateMath) { 'use strict'; var module = angular.module('grafana.services'); @@ -22,17 +23,17 @@ define([ this._parseTime(); if(this.dashboard.refresh) { - this.set_interval(this.dashboard.refresh); + this.setAutoRefresh(this.dashboard.refresh); } }; this._parseTime = function() { // when absolute time is saved in json it is turned to a string if (_.isString(this.time.from) && this.time.from.indexOf('Z') >= 0) { - this.time.from = new Date(this.time.from); + this.time.from = moment(this.time.from).utc(); } if (_.isString(this.time.to) && this.time.to.indexOf('Z') >= 0) { - this.time.to = new Date(this.time.to); + this.time.to = moment(this.time.to).utc(); } }; @@ -41,14 +42,14 @@ define([ return value; } if (value.length === 8) { - return moment.utc(value, 'YYYYMMDD').toDate(); + return moment.utc(value, 'YYYYMMDD'); } if (value.length === 15) { - return moment.utc(value, 'YYYYMMDDTHHmmss').toDate(); + return moment.utc(value, 'YYYYMMDDTHHmmss'); } var epoch = parseInt(value); if (!_.isNaN(epoch)) { - return new Date(epoch); + return moment(epoch); } return null; @@ -63,7 +64,7 @@ define([ } }; - this.set_interval = function (interval) { + this.setAutoRefresh = function (interval) { this.dashboard.refresh = interval; if (interval) { var _i = kbn.interval_to_ms(interval); @@ -89,16 +90,16 @@ define([ timer.cancel(this.refresh_timer); }; - this.setTime = function(time) { + this.setTime = function(time, enableRefresh) { _.extend(this.time, time); - // disable refresh if we have an absolute time - if (_.isString(time.to) && time.to.indexOf('now') === -1) { + // disable refresh if zoom in or zoom out + if (!enableRefresh && moment.isMoment(time.to)) { this.old_refresh = this.dashboard.refresh || this.old_refresh; - this.set_interval(false); + this.setAutoRefresh(false); } else if (this.old_refresh && this.old_refresh !== this.dashboard.refresh) { - this.set_interval(this.old_refresh); + this.setAutoRefresh(this.old_refresh); this.old_refresh = null; } @@ -112,29 +113,23 @@ define([ range = this.timeRange(); } - if (_.isDate(range.from)) { range.from = range.from.getTime(); } - if (_.isDate(range.to)) { range.to = range.to.getTime(); } + if (moment.isMoment(range.from)) { range.from = range.from.valueOf(); } + if (moment.isMoment(range.to)) { range.to = range.to.valueOf(); } return range; }; this.timeRange = function(parse) { - var _t = this.time; + // make copies if they are moment (do not want to return out internal moment, because they are mutable!) + var from = moment.isMoment(this.time.from) ? moment(this.time.from) : this.time.from ; + var to = moment.isMoment(this.time.to) ? moment(this.time.to) : this.time.to ; - if(parse === false) { - return { - from: _t.from, - to: _t.to - }; - } else { - var _from = _t.from; - var _to = _t.to || new Date(); - - return { - from: kbn.parseDate(_from), - to: kbn.parseDate(_to) - }; + if (parse !== false) { + from = dateMath.parse(from, false); + to = dateMath.parse(to, true); } + + return {from: from, to: to}; }; }); diff --git a/public/app/panels/timepicker/custom.html b/public/app/features/dashboard/timepicker/custom.html similarity index 73% rename from public/app/panels/timepicker/custom.html rename to public/app/features/dashboard/timepicker/custom.html index c5532ddfb5d33..519637bf5a347 100644 --- a/public/app/panels/timepicker/custom.html +++ b/public/app/features/dashboard/timepicker/custom.html @@ -9,40 +9,6 @@
    - -
    @@ -59,7 +25,7 @@
    - +
    @@ -69,12 +35,12 @@ .
    -   Right Now +   Right Now

    - + Invalid date or range
    diff --git a/public/app/features/dashboard/timepicker/dropdown.html b/public/app/features/dashboard/timepicker/dropdown.html new file mode 100644 index 0000000000000..6205e8102b1cd --- /dev/null +++ b/public/app/features/dashboard/timepicker/dropdown.html @@ -0,0 +1,48 @@ +
    +
    +

    Time range

    + +
    + + +
    + +
    + +
    + + +
    + + +
    + +
    + +
    + + + + + +
    + +
    +

    Quick ranges

    +
      +
    • + +
    • +
    +
    + +
    +
    + diff --git a/public/app/features/dashboard/timepicker/input_date.js b/public/app/features/dashboard/timepicker/input_date.js new file mode 100644 index 0000000000000..caec5ba55fe48 --- /dev/null +++ b/public/app/features/dashboard/timepicker/input_date.js @@ -0,0 +1,44 @@ +define([ + "angular", + "lodash", + "moment", +],function (angular, _, moment) { + 'use strict'; + + angular. + module("grafana.directives"). + directive('inputDatetime', function () { + return { + restrict: 'A', + require: 'ngModel', + link: function ($scope, $elem, attrs, ngModel) { + var format = 'YYYY-MM-DD HH:mm:ss'; + + var fromUser = function (text) { + if (text.indexOf('now') !== -1) { + return text; + } + var parsed; + if ($scope.ctrl.isUtc) { + parsed = moment.utc(text, format); + } else { + parsed = moment(text, format); + } + + return parsed.isValid() ? parsed : undefined; + }; + + var toUser = function (currentValue) { + if (moment.isMoment(currentValue)) { + return currentValue.format(format); + } else { + return currentValue; + } + }; + + ngModel.$parsers.push(fromUser); + ngModel.$formatters.push(toUser); + } + }; + }); +}); diff --git a/public/app/features/dashboard/timepicker/settings.html b/public/app/features/dashboard/timepicker/settings.html new file mode 100644 index 0000000000000..fed0094086c99 --- /dev/null +++ b/public/app/features/dashboard/timepicker/settings.html @@ -0,0 +1,48 @@ +
    +
    +
    + + + + + + + + + + + +
    +
      +
    • + Auto-refresh +
    • +
    • + +
    • +
    +
    +
    + +
    +
      +
    • + Now delay +
    • +
    • + now- +
    • +
    • + +
    • + +
    +
    +
    +
    +
    +
    diff --git a/public/app/features/dashboard/timepicker/timepicker.html b/public/app/features/dashboard/timepicker/timepicker.html new file mode 100644 index 0000000000000..ce976e2cc3d6a --- /dev/null +++ b/public/app/features/dashboard/timepicker/timepicker.html @@ -0,0 +1,32 @@ + diff --git a/public/app/features/dashboard/timepicker/timepicker.ts b/public/app/features/dashboard/timepicker/timepicker.ts new file mode 100644 index 0000000000000..0f99210f3fa1b --- /dev/null +++ b/public/app/features/dashboard/timepicker/timepicker.ts @@ -0,0 +1,179 @@ +/// +/// + +import angular = require('angular'); +import _ = require('lodash'); +import moment = require('moment'); +import kbn = require('app/core/utils/kbn'); +import dateMath = require('app/core/utils/datemath'); +import rangeUtil = require('app/core/utils/rangeutil'); + +declare var inputDate: any; + +export class TimePickerCtrl { + + static tooltipFormat = 'MMM D, YYYY HH:mm:ss'; + static defaults = { + time_options : ['5m','15m','1h','6h','12h','24h','2d','7d','30d'], + refresh_intervals : ['5s','10s','30s','1m','5m','15m','30m','1h','2h','1d'], + }; + + dashboard: any; + panel: any; + absolute: any; + timeRaw: any; + tooltip: string; + rangeString: string; + timeOptions: any; + refresh: any; + isOpen: boolean; + isUtc: boolean; + + constructor(private $scope, private $rootScope, private timeSrv) { + $scope.ctrl = this; + + $rootScope.onAppEvent('zoom-out', () => this.zoom(2), $scope); + $rootScope.onAppEvent('refresh', () => this.init(), $scope); + $rootScope.onAppEvent('dash-editor-hidden', () => this.isOpen = false, $scope); + + this.init(); + } + + init() { + this.panel = this.dashboard.timepicker; + + _.defaults(this.panel, TimePickerCtrl.defaults); + + var time = angular.copy(this.timeSrv.timeRange()); + var timeRaw = angular.copy(this.timeSrv.timeRange(false)); + + if (this.dashboard.timezone === 'browser') { + time.from.local(); + time.to.local(); + if (moment.isMoment(timeRaw.from)) { + timeRaw.from.local(); + } + if (moment.isMoment(timeRaw.to)) { + timeRaw.to.local(); + } + } else { + this.isUtc = true; + } + + this.rangeString = rangeUtil.describeTimeRange(timeRaw); + this.absolute = {fromJs: time.from.toDate(), toJs: time.to.toDate()}; + this.tooltip = this.dashboard.formatDate(time.from) + '
    to
    '; + this.tooltip += this.dashboard.formatDate(time.to); + + // do not update time raw when dropdown is open + // as auto refresh will reset the from/to input fields + if (!this.isOpen) { + this.timeRaw = timeRaw; + } + } + + zoom(factor) { + var range = this.timeSrv.timeRange(); + + var timespan = (range.to.valueOf() - range.from.valueOf()); + var center = range.to.valueOf() - timespan/2; + + var to = (center + (timespan*factor)/2); + var from = (center - (timespan*factor)/2); + + if (to > Date.now() && range.to <= Date.now()) { + var offset = to - Date.now(); + from = from - offset; + to = Date.now(); + } + + this.timeSrv.setTime({from: moment.utc(from), to: moment.utc(to) }); + } + + openDropdown() { + this.init(); + this.isOpen = true; + this.timeOptions = rangeUtil.getRelativeTimesList(this.panel, this.rangeString); + this.refresh = { + value: this.dashboard.refresh, + options: _.map(this.panel.refresh_intervals, (interval: any) => { + return {text: interval, value: interval}; + }) + }; + + this.refresh.options.unshift({text: 'off'}); + + this.$rootScope.appEvent('show-dash-editor', { + src: 'app/features/dashboard/timepicker/dropdown.html', + scope: this.$scope, + cssClass: 'gf-timepicker-dropdown', + }); + } + + applyCustom() { + if (this.refresh.value !== this.dashboard.refresh) { + this.timeSrv.setAutoRefresh(this.refresh.value); + } + + this.timeSrv.setTime(this.timeRaw, true); + this.$rootScope.appEvent('hide-dash-editor'); + } + + absoluteFromChanged() { + this.timeRaw.from = this.getAbsoluteMomentForTimezone(this.absolute.fromJs); + } + + absoluteToChanged() { + this.timeRaw.to = this.getAbsoluteMomentForTimezone(this.absolute.toJs); + } + + getAbsoluteMomentForTimezone(jsDate) { + return this.dashboard.timezone === 'browser' ? moment(jsDate) : moment(jsDate).utc(); + } + + setRelativeFilter(timespan) { + this.panel.now = true; + + var range = {from: timespan.from, to: timespan.to}; + + if (this.panel.nowDelay && range.to === 'now') { + range.to = 'now-' + this.panel.nowDelay; + } + + this.timeSrv.setTime(range); + this.$rootScope.appEvent('hide-dash-editor'); + } + +} + +export function settingsDirective() { + 'use strict'; + return { + restrict: 'E', + templateUrl: 'app/features/dashboard/timepicker/settings.html', + controller: TimePickerCtrl, + bindToController: true, + controllerAs: 'ctrl', + scope: { + dashboard: "=" + } + }; +} + +export function timePickerDirective() { + 'use strict'; + return { + restrict: 'E', + templateUrl: 'app/features/dashboard/timepicker/timepicker.html', + controller: TimePickerCtrl, + bindToController: true, + controllerAs: 'ctrl', + scope: { + dashboard: "=" + } + }; +} + + +angular.module('grafana.directives').directive('gfTimePickerSettings', settingsDirective); +angular.module('grafana.directives').directive('gfTimePicker', timePickerDirective); diff --git a/public/app/features/dashboard/unsavedChangesSrv.js b/public/app/features/dashboard/unsavedChangesSrv.js index eeff9ae05c76b..bbb38d745c72f 100644 --- a/public/app/features/dashboard/unsavedChangesSrv.js +++ b/public/app/features/dashboard/unsavedChangesSrv.js @@ -7,7 +7,7 @@ function(angular, _) { var module = angular.module('grafana.services'); - module.service('unsavedChangesSrv', function($modal, $q, $location, $timeout, contextSrv, $window) { + module.service('unsavedChangesSrv', function($rootScope, $q, $location, $timeout, contextSrv, $window) { function Tracker(dashboard, scope) { var self = this; @@ -122,11 +122,7 @@ function(angular, _) { var currentJson = angular.toJson(current); var originalJson = angular.toJson(original); - if (currentJson !== originalJson) { - return true; - } - - return false; + return currentJson !== originalJson; }; p.open_modal = function() { @@ -142,17 +138,10 @@ function(angular, _) { tracker.scope.$emit('save-dashboard'); }; - var confirmModal = $modal({ - template: './app/partials/unsaved-changes.html', - modalClass: 'confirm-modal', - persist: false, - show: false, + $rootScope.appEvent('show-modal', { + src: './app/partials/unsaved-changes.html', + modalClass: 'modal-no-header confirm-modal', scope: modalScope, - keyboard: false - }); - - $q.when(confirmModal).then(function(modalEl) { - modalEl.modal('show'); }); }; diff --git a/public/app/features/dashboard/viewStateSrv.js b/public/app/features/dashboard/viewStateSrv.js index e87a942a6defe..58403766a3166 100644 --- a/public/app/features/dashboard/viewStateSrv.js +++ b/public/app/features/dashboard/viewStateSrv.js @@ -89,6 +89,11 @@ function (angular, _, $) { this.leaveFullscreen(false); } var panelScope = this.getPanelScope(this.state.panelId); + // panel could be about to be created/added and scope does + // not exist yet + if (!panelScope) { + return; + } this.enterFullscreen(panelScope); return; } @@ -111,6 +116,8 @@ function (angular, _, $) { self.fullscreenPanel.fullscreen = false; delete self.fullscreenPanel.height; + this.$scope.appEvent('panel-fullscreen-exit', {panelId: this.fullscreenPanel.panel.id}); + if (!render) { return false;} $timeout(function() { @@ -125,8 +132,6 @@ function (angular, _, $) { }; DashboardViewState.prototype.enterFullscreen = function(panelScope) { - this.$scope.appEvent('hide-dash-editor'); - var docHeight = $(window).height(); var editHeight = Math.floor(docHeight * 0.3); var fullscreenHeight = Math.floor(docHeight * 0.7); @@ -140,6 +145,7 @@ function (angular, _, $) { $(window).scrollTop(0); panelScope.fullscreen = true; + this.$scope.appEvent('panel-fullscreen-enter', {panelId: panelScope.panel.id}); $timeout(function() { panelScope.$broadcast('render'); diff --git a/public/app/features/dashlinks/editor.html b/public/app/features/dashlinks/editor.html index 886550d9b9c64..9f44d72206c03 100644 --- a/public/app/features/dashlinks/editor.html +++ b/public/app/features/dashlinks/editor.html @@ -58,7 +58,7 @@
    Links and Dash Navigation
  • Tooltip
  • - +
  • Icon
  • @@ -67,7 +67,7 @@
    Links and Dash Navigation
  • -
    +
    • diff --git a/public/app/features/dashlinks/module.js b/public/app/features/dashlinks/module.js index 23ce8c0ffaec8..b7029e6cf2df9 100644 --- a/public/app/features/dashlinks/module.js +++ b/public/app/features/dashlinks/module.js @@ -52,7 +52,7 @@ function (angular, _) { if (link.asDropdown) { template += '
    +
    +
      +
    • + +
    • +
    • + +
    • +
    • + All format +
    • +
    • + +
    • +
    +
    +
    @@ -169,7 +208,7 @@
    Value Options
    All format
  • - +
  • @@ -200,7 +239,7 @@
    Multi-value selection Enables multiple values to be selected at the sam Multi format
  • - +
  • @@ -229,7 +268,7 @@
    Display options
    Value groups/tags (Experimental feature)
    -
    +
    • Tags query @@ -246,7 +285,7 @@
      Value groups/tags (Experimental feature)
      Tag values query
    • - +
    @@ -278,8 +317,8 @@
    Preview of values (shows max 20)
    - - + +
    diff --git a/public/app/features/templating/templateSrv.js b/public/app/features/templating/templateSrv.js index 40b661b0cbc0a..9a59510c6a465 100644 --- a/public/app/features/templating/templateSrv.js +++ b/public/app/features/templating/templateSrv.js @@ -39,10 +39,23 @@ function (angular, _) { if (_.isString(value)) { return value; } else { - if (variable.multiFormat === 'regex values') { - return '(' + value.join('|') + ')'; + switch(variable.multiFormat) { + case "regex values": { + return '(' + value.join('|') + ')'; + } + case "lucene": { + var quotedValues = _.map(value, function(val) { + return '\\\"' + val + '\\\"'; + }); + return '(' + quotedValues.join(' OR ') + ')'; + } + case "pipe": { + return value.join('|'); + } + default: { + return '{' + value.join(',') + '}'; + } } - return '{' + value.join(',') + '}'; } }; @@ -115,17 +128,20 @@ function (angular, _) { }); }; - this.fillVariableValuesForUrl = function(params) { - var toUrlVal = function(current) { + this.fillVariableValuesForUrl = function(params, scopedVars) { + _.each(this.variables, function(variable) { + var current = variable.current; + var value = current.value; + if (current.text === 'All') { - return 'All'; - } else { - return current.value; + value = 'All'; } - }; - _.each(this.variables, function(variable) { - params['var-' + variable.name] = toUrlVal(variable.current); + if (scopedVars && scopedVars[variable.name] !== void 0) { + value = scopedVars[variable.name].value; + } + + params['var-' + variable.name] = value; }); }; diff --git a/public/app/features/templating/templateValuesSrv.js b/public/app/features/templating/templateValuesSrv.js index 57cce7a8df553..6029bdf899cd9 100644 --- a/public/app/features/templating/templateValuesSrv.js +++ b/public/app/features/templating/templateValuesSrv.js @@ -1,7 +1,7 @@ define([ 'angular', 'lodash', - 'kbn', + 'app/core/utils/kbn', ], function (angular, _, kbn) { 'use strict'; @@ -18,7 +18,7 @@ function (angular, _, kbn) { if (variable) { self.updateAutoInterval(variable); } - }); + }, $rootScope); this.init = function(dashboard) { this.variables = dashboard.templating.list; @@ -45,17 +45,6 @@ function (angular, _, kbn) { }; this.setVariableFromUrl = function(variable, urlValue) { - if (variable.refresh) { - var self = this; - //refresh the list of options before setting the value - return this.updateOptions(variable).then(function() { - var option = _.findWhere(variable.options, { text: urlValue }); - option = option || { text: urlValue, value: urlValue }; - - self.updateAutoInterval(variable); - return self.setVariableValue(variable, option); - }); - } var option = _.findWhere(variable.options, { text: urlValue }); option = option || { text: urlValue, value: urlValue }; @@ -80,11 +69,12 @@ function (angular, _, kbn) { if (_.isArray(variable.current.value)) { variable.current.text = variable.current.value.join(' + '); - this.selectOptionsForCurrentValue(variable); } + self.selectOptionsForCurrentValue(variable); + templateSrv.updateTemplateData(); - return this.updateOptionsInChildVariables(variable); + return self.updateOptionsInChildVariables(variable); }; this.variableUpdated = function(variable) { @@ -114,6 +104,11 @@ function (angular, _, kbn) { if (variable.type === 'interval') { self.updateAutoInterval(variable); } + + if (variable.type === 'custom' && variable.includeAll) { + self.addAllOption(variable); + } + }; this.updateOptions = function(variable) { @@ -130,13 +125,20 @@ function (angular, _, kbn) { }; this.selectOptionsForCurrentValue = function(variable) { - for (var i = 0; i < variable.current.value.length; i++) { - var value = variable.current.value[i]; - for (var y = 0; y < variable.options.length; y++) { - var option = variable.options[y]; - if (option.value === value) { - option.selected = true; + var i, y, value, option; + + for (i = 0; i < variable.options.length; i++) { + option = variable.options[i]; + option.selected = false; + if (_.isArray(variable.current.value)) { + for (y = 0; y < variable.current.value.length; y++) { + value = variable.current.value[y]; + if (option.value === value) { + option.selected = true; + } } + } else if (option.value === variable.current.value) { + option.selected = true; } } }; @@ -148,7 +150,7 @@ function (angular, _, kbn) { } if (_.isArray(variable.current.value)) { - this.selectOptionsForCurrentValue(variable); + self.selectOptionsForCurrentValue(variable); } else { var currentOption = _.findWhere(variable.options, { text: variable.current.text }); if (currentOption) { @@ -245,21 +247,36 @@ function (angular, _, kbn) { this.addAllOption = function(variable) { var allValue = ''; switch(variable.allFormat) { - case 'wildcard': - allValue = '*'; - break; - case 'regex wildcard': - allValue = '.*'; - break; - case 'regex values': - allValue = '(' + _.map(variable.options, function(option) { - return self.regexEscape(option.text); - }).join('|') + ')'; - break; - default: - allValue = '{'; - allValue += _.pluck(variable.options, 'text').join(','); - allValue += '}'; + case 'wildcard': { + allValue = '*'; + break; + } + case 'regex wildcard': { + allValue = '.*'; + break; + } + case 'lucene': { + var quotedValues = _.map(variable.options, function(val) { + return '\\\"' + val.text + '\\\"'; + }); + allValue = '(' + quotedValues.join(' OR ') + ')'; + break; + } + case 'regex values': { + allValue = '(' + _.map(variable.options, function(option) { + return self.regexEscape(option.text); + }).join('|') + ')'; + break; + } + case 'pipe': { + allValue = _.pluck(variable.options, 'text').join('|'); + break; + } + default: { + allValue = '{'; + allValue += _.pluck(variable.options, 'text').join(','); + allValue += '}'; + } } variable.options.unshift({text: 'All', value: allValue}); diff --git a/public/app/filters/all.js b/public/app/filters/all.js deleted file mode 100755 index c904c7c9449bd..0000000000000 --- a/public/app/filters/all.js +++ /dev/null @@ -1,72 +0,0 @@ -define(['angular', 'jquery', 'lodash', 'moment'], function (angular, $, _, moment) { - 'use strict'; - - var module = angular.module('grafana.filters'); - - module.filter('stringSort', function() { - return function(input) { - return input.sort(); - }; - }); - - module.filter('slice', function() { - return function(arr, start, end) { - if(!_.isUndefined(arr)) { - return arr.slice(start, end); - } - }; - }); - - module.filter('stringify', function() { - return function(arr) { - if(_.isObject(arr) && !_.isArray(arr)) { - return angular.toJson(arr); - } else { - return _.isNull(arr) ? null : arr.toString(); - } - }; - }); - - module.filter('moment', function() { - return function(date,mode) { - switch(mode) { - case 'ago': - return moment(date).fromNow(); - } - return moment(date).fromNow(); - }; - }); - - module.filter('noXml', function() { - var noXml = function(text) { - return _.isString(text) - ? text - .replace(/&/g, '&') - .replace(//g, '>') - .replace(/'/g, ''') - .replace(/"/g, '"') - : text; - }; - return function(text) { - return _.isArray(text) - ? _.map(text, noXml) - : noXml(text); - }; - }); - - module.filter('interpolateTemplateVars', function(templateSrv) { - function interpolateTemplateVars(text, scope) { - if (scope.panel) { - return templateSrv.replaceWithText(text, scope.panel.scopedVars); - } else { - return templateSrv.replaceWithText(text, scope.row.scopedVars); - } - } - - interpolateTemplateVars.$stateful = true; - - return interpolateTemplateVars; - }); - -}); diff --git a/public/app/headers/angularjs/angularjs.d.ts b/public/app/headers/angularjs/angularjs.d.ts new file mode 100644 index 0000000000000..2e746389d9097 --- /dev/null +++ b/public/app/headers/angularjs/angularjs.d.ts @@ -0,0 +1,1746 @@ +// Type definitions for Angular JS 1.4+ +// Project: http://angularjs.org +// Definitions by: Diego Vilar +// Definitions: https://github.com/borisyankov/DefinitelyTyped + + +/// + + +// Support for painless dependency injection +interface Function { + $inject?: string[]; +} + +// Collapse angular into ng +import ng = angular; +// Support AMD require +declare module 'angular' { + var angular: angular.IAngularStatic; + export = angular; +} + +/////////////////////////////////////////////////////////////////////////////// +// ng module (angular.js) +/////////////////////////////////////////////////////////////////////////////// +declare module angular { + + // not directly implemented, but ensures that constructed class implements $get + interface IServiceProviderClass { + new (...args: any[]): IServiceProvider; + } + + interface IServiceProviderFactory { + (...args: any[]): IServiceProvider; + } + + // All service providers extend this interface + interface IServiceProvider { + $get: any; + } + + interface IAngularBootstrapConfig { + strictDi?: boolean; + } + + /////////////////////////////////////////////////////////////////////////// + // AngularStatic + // see http://docs.angularjs.org/api + /////////////////////////////////////////////////////////////////////////// + interface IAngularStatic { + bind(context: any, fn: Function, ...args: any[]): Function; + + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: string, modules?: string, config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: string, modules?: Function, config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: string, modules?: string[], config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: JQuery, modules?: string, config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: JQuery, modules?: Function, config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: JQuery, modules?: string[], config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: Element, modules?: string, config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: Element, modules?: Function, config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: Element, modules?: string[], config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: Document, modules?: string, config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: Document, modules?: Function, config?: IAngularBootstrapConfig): auto.IInjectorService; + /** + * Use this function to manually start up angular application. + * + * @param element DOM element which is the root of angular application. + * @param modules An array of modules to load into the application. + * Each item in the array should be the name of a predefined module or a (DI annotated) + * function that will be invoked by the injector as a run block. + * @param config an object for defining configuration options for the application. The following keys are supported: + * - `strictDi`: disable automatic function annotation for the application. This is meant to assist in finding bugs which break minified code. + */ + bootstrap(element: Document, modules?: string[], config?: IAngularBootstrapConfig): auto.IInjectorService; + + /** + * Creates a deep copy of source, which should be an object or an array. + * + * - If no destination is supplied, a copy of the object or array is created. + * - If a destination is provided, all of its elements (for array) or properties (for objects) are deleted and then all elements/properties from the source are copied to it. + * - If source is not an object or array (inc. null and undefined), source is returned. + * - If source is identical to 'destination' an exception will be thrown. + * + * @param source The source that will be used to make a copy. Can be any type, including primitives, null, and undefined. + * @param destination Destination into which the source is copied. If provided, must be of the same type as source. + */ + copy(source: T, destination?: T): T; + + /** + * Wraps a raw DOM element or HTML string as a jQuery element. + * + * If jQuery is available, angular.element is an alias for the jQuery function. If jQuery is not available, angular.element delegates to Angular's built-in subset of jQuery, called "jQuery lite" or "jqLite." + */ + element: IAugmentedJQueryStatic; + equals(value1: any, value2: any): boolean; + extend(destination: any, ...sources: any[]): any; + + /** + * Invokes the iterator function once for each item in obj collection, which can be either an object or an array. The iterator function is invoked with iterator(value, key), where value is the value of an object property or an array element and key is the object property key or array element index. Specifying a context for the function is optional. + * + * It is worth noting that .forEach does not iterate over inherited properties because it filters using the hasOwnProperty method. + * + * @param obj Object to iterate over. + * @param iterator Iterator function. + * @param context Object to become context (this) for the iterator function. + */ + forEach(obj: T[], iterator: (value: T, key: number) => any, context?: any): any; + /** + * Invokes the iterator function once for each item in obj collection, which can be either an object or an array. The iterator function is invoked with iterator(value, key), where value is the value of an object property or an array element and key is the object property key or array element index. Specifying a context for the function is optional. + * + * It is worth noting that .forEach does not iterate over inherited properties because it filters using the hasOwnProperty method. + * + * @param obj Object to iterate over. + * @param iterator Iterator function. + * @param context Object to become context (this) for the iterator function. + */ + forEach(obj: { [index: string]: T; }, iterator: (value: T, key: string) => any, context?: any): any; + /** + * Invokes the iterator function once for each item in obj collection, which can be either an object or an array. The iterator function is invoked with iterator(value, key), where value is the value of an object property or an array element and key is the object property key or array element index. Specifying a context for the function is optional. + * + * It is worth noting that .forEach does not iterate over inherited properties because it filters using the hasOwnProperty method. + * + * @param obj Object to iterate over. + * @param iterator Iterator function. + * @param context Object to become context (this) for the iterator function. + */ + forEach(obj: any, iterator: (value: any, key: any) => any, context?: any): any; + + fromJson(json: string): any; + identity(arg?: T): T; + injector(modules?: any[], strictDi?: boolean): auto.IInjectorService; + isArray(value: any): boolean; + isDate(value: any): boolean; + isDefined(value: any): boolean; + isElement(value: any): boolean; + isFunction(value: any): boolean; + isNumber(value: any): boolean; + isObject(value: any): boolean; + isString(value: any): boolean; + isUndefined(value: any): boolean; + lowercase(str: string): string; + + /** + * Deeply extends the destination object dst by copying own enumerable properties from the src object(s) to dst. You can specify multiple src objects. If you want to preserve original objects, you can do so by passing an empty object as the target: var object = angular.merge({}, object1, object2). + * + * Unlike extend(), merge() recursively descends into object properties of source objects, performing a deep copy. + * + * @param dst Destination object. + * @param src Source object(s). + */ + merge(dst: any, ...src: any[]): any; + + /** + * The angular.module is a global place for creating, registering and retrieving Angular modules. All modules (angular core or 3rd party) that should be available to an application must be registered using this mechanism. + * + * When passed two or more arguments, a new module is created. If passed only one argument, an existing module (the name passed as the first argument to module) is retrieved. + * + * @param name The name of the module to create or retrieve. + * @param requires The names of modules this module depends on. If specified then new module is being created. If unspecified then the module is being retrieved for further configuration. + * @param configFn Optional configuration function for the module. + */ + module( + name: string, + requires?: string[], + configFn?: Function): IModule; + + noop(...args: any[]): void; + reloadWithDebugInfo(): void; + toJson(obj: any, pretty?: boolean): string; + uppercase(str: string): string; + version: { + full: string; + major: number; + minor: number; + dot: number; + codeName: string; + }; + } + + /////////////////////////////////////////////////////////////////////////// + // Module + // see http://docs.angularjs.org/api/angular.Module + /////////////////////////////////////////////////////////////////////////// + interface IModule { + animation(name: string, animationFactory: Function): IModule; + animation(name: string, inlineAnnotatedFunction: any[]): IModule; + animation(object: Object): IModule; + /** + * Use this method to register work which needs to be performed on module loading. + * + * @param configFn Execute this function on module load. Useful for service configuration. + */ + config(configFn: Function): IModule; + /** + * Use this method to register work which needs to be performed on module loading. + * + * @param inlineAnnotatedFunction Execute this function on module load. Useful for service configuration. + */ + config(inlineAnnotatedFunction: any[]): IModule; + /** + * Register a constant service, such as a string, a number, an array, an object or a function, with the $injector. Unlike value it can be injected into a module configuration function (see config) and it cannot be overridden by an Angular decorator. + * + * @param name The name of the constant. + * @param value The constant value. + */ + constant(name: string, value: any): IModule; + constant(object: Object): IModule; + /** + * The $controller service is used by Angular to create new controllers. + * + * This provider allows controller registration via the register method. + * + * @param name Controller name, or an object map of controllers where the keys are the names and the values are the constructors. + * @param controllerConstructor Controller constructor fn (optionally decorated with DI annotations in the array notation). + */ + controller(name: string, controllerConstructor: Function): IModule; + /** + * The $controller service is used by Angular to create new controllers. + * + * This provider allows controller registration via the register method. + * + * @param name Controller name, or an object map of controllers where the keys are the names and the values are the constructors. + * @param controllerConstructor Controller constructor fn (optionally decorated with DI annotations in the array notation). + */ + controller(name: string, inlineAnnotatedConstructor: any[]): IModule; + controller(object: Object): IModule; + /** + * Register a new directive with the compiler. + * + * @param name Name of the directive in camel-case (i.e. ngBind which will match as ng-bind) + * @param directiveFactory An injectable directive factory function. + */ + directive(name: string, directiveFactory: IDirectiveFactory): IModule; + /** + * Register a new directive with the compiler. + * + * @param name Name of the directive in camel-case (i.e. ngBind which will match as ng-bind) + * @param directiveFactory An injectable directive factory function. + */ + directive(name: string, inlineAnnotatedFunction: any[]): IModule; + directive(object: Object): IModule; + /** + * Register a service factory, which will be called to return the service instance. This is short for registering a service where its provider consists of only a $get property, which is the given service factory function. You should use $provide.factory(getFn) if you do not need to configure your service in a provider. + * + * @param name The name of the instance. + * @param $getFn The $getFn for the instance creation. Internally this is a short hand for $provide.provider(name, {$get: $getFn}). + */ + factory(name: string, $getFn: Function): IModule; + /** + * Register a service factory, which will be called to return the service instance. This is short for registering a service where its provider consists of only a $get property, which is the given service factory function. You should use $provide.factory(getFn) if you do not need to configure your service in a provider. + * + * @param name The name of the instance. + * @param inlineAnnotatedFunction The $getFn for the instance creation. Internally this is a short hand for $provide.provider(name, {$get: $getFn}). + */ + factory(name: string, inlineAnnotatedFunction: any[]): IModule; + factory(object: Object): IModule; + filter(name: string, filterFactoryFunction: Function): IModule; + filter(name: string, inlineAnnotatedFunction: any[]): IModule; + filter(object: Object): IModule; + provider(name: string, serviceProviderFactory: IServiceProviderFactory): IModule; + provider(name: string, serviceProviderConstructor: IServiceProviderClass): IModule; + provider(name: string, inlineAnnotatedConstructor: any[]): IModule; + provider(name: string, providerObject: IServiceProvider): IModule; + provider(object: Object): IModule; + /** + * Run blocks are the closest thing in Angular to the main method. A run block is the code which needs to run to kickstart the application. It is executed after all of the service have been configured and the injector has been created. Run blocks typically contain code which is hard to unit-test, and for this reason should be declared in isolated modules, so that they can be ignored in the unit-tests. + */ + run(initializationFunction: Function): IModule; + /** + * Run blocks are the closest thing in Angular to the main method. A run block is the code which needs to run to kickstart the application. It is executed after all of the service have been configured and the injector has been created. Run blocks typically contain code which is hard to unit-test, and for this reason should be declared in isolated modules, so that they can be ignored in the unit-tests. + */ + run(inlineAnnotatedFunction: any[]): IModule; + service(name: string, serviceConstructor: Function): IModule; + service(name: string, inlineAnnotatedConstructor: any[]): IModule; + service(object: Object): IModule; + /** + * Register a value service with the $injector, such as a string, a number, an array, an object or a function. This is short for registering a service where its provider's $get property is a factory function that takes no arguments and returns the value service. + + Value services are similar to constant services, except that they cannot be injected into a module configuration function (see config) but they can be overridden by an Angular decorator. + * + * @param name The name of the instance. + * @param value The value. + */ + value(name: string, value: any): IModule; + value(object: Object): IModule; + + /** + * Register a service decorator with the $injector. A service decorator intercepts the creation of a service, allowing it to override or modify the behaviour of the service. The object returned by the decorator may be the original service, or a new service object which replaces or wraps and delegates to the original service. + * @param name The name of the service to decorate + * @param decorator This function will be invoked when the service needs to be instantiated and should return the decorated service instance. The function is called using the injector.invoke method and is therefore fully injectable. Local injection arguments: $delegate - The original service instance, which can be monkey patched, configured, decorated or delegated to. + */ + decorator(name:string, decoratorConstructor: Function): IModule; + decorator(name:string, inlineAnnotatedConstructor: any[]): IModule; + + // Properties + name: string; + requires: string[]; + } + + /////////////////////////////////////////////////////////////////////////// + // Attributes + // see http://docs.angularjs.org/api/ng.$compile.directive.Attributes + /////////////////////////////////////////////////////////////////////////// + interface IAttributes { + /** + * this is necessary to be able to access the scoped attributes. it's not very elegant + * because you have to use attrs['foo'] instead of attrs.foo but I don't know of a better way + * this should really be limited to return string but it creates this problem: http://stackoverflow.com/q/17201854/165656 + */ + [name: string]: any; + + /** + * Converts an attribute name (e.g. dash/colon/underscore-delimited string, optionally prefixed with x- or data-) to its normalized, camelCase form. + * + * Also there is special case for Moz prefix starting with upper case letter. + * + * For further information check out the guide on @see https://docs.angularjs.org/guide/directive#matching-directives + */ + $normalize(name: string): void; + + /** + * Adds the CSS class value specified by the classVal parameter to the + * element. If animations are enabled then an animation will be triggered + * for the class addition. + */ + $addClass(classVal: string): void; + + /** + * Removes the CSS class value specified by the classVal parameter from the + * element. If animations are enabled then an animation will be triggered for + * the class removal. + */ + $removeClass(classVal: string): void; + + /** + * Set DOM element attribute value. + */ + $set(key: string, value: any): void; + + /** + * Observes an interpolated attribute. + * The observer function will be invoked once during the next $digest + * following compilation. The observer is then invoked whenever the + * interpolated value changes. + */ + $observe(name: string, fn: (value?: T) => any): Function; + + /** + * A map of DOM element attribute names to the normalized name. This is needed + * to do reverse lookup from normalized name back to actual name. + */ + $attr: Object; + } + + /** + * form.FormController - type in module ng + * see https://docs.angularjs.org/api/ng/type/form.FormController + */ + interface IFormController { + + /** + * Indexer which should return ng.INgModelController for most properties but cannot because of "All named properties must be assignable to string indexer type" constraint - see https://github.com/Microsoft/TypeScript/issues/272 + */ + [name: string]: any; + + $pristine: boolean; + $dirty: boolean; + $valid: boolean; + $invalid: boolean; + $submitted: boolean; + $error: any; + $addControl(control: INgModelController): void; + $removeControl(control: INgModelController): void; + $setValidity(validationErrorKey: string, isValid: boolean, control: INgModelController): void; + $setDirty(): void; + $setPristine(): void; + $commitViewValue(): void; + $rollbackViewValue(): void; + $setSubmitted(): void; + $setUntouched(): void; + } + + /////////////////////////////////////////////////////////////////////////// + // NgModelController + // see http://docs.angularjs.org/api/ng.directive:ngModel.NgModelController + /////////////////////////////////////////////////////////////////////////// + interface INgModelController { + $render(): void; + $setValidity(validationErrorKey: string, isValid: boolean): void; + // Documentation states viewValue and modelValue to be a string but other + // types do work and it's common to use them. + $setViewValue(value: any, trigger?: string): void; + $setPristine(): void; + $setDirty(): void; + $validate(): void; + $setTouched(): void; + $setUntouched(): void; + $rollbackViewValue(): void; + $commitViewValue(): void; + $isEmpty(value: any): boolean; + + $viewValue: any; + + $modelValue: any; + + $parsers: IModelParser[]; + $formatters: IModelFormatter[]; + $viewChangeListeners: IModelViewChangeListener[]; + $error: any; + $name: string; + + $touched: boolean; + $untouched: boolean; + + $validators: IModelValidators; + $asyncValidators: IAsyncModelValidators; + + $pending: any; + $pristine: boolean; + $dirty: boolean; + $valid: boolean; + $invalid: boolean; + } + + interface IModelValidators { + /** + * viewValue is any because it can be an object that is called in the view like $viewValue.name:$viewValue.subName + */ + [index: string]: (modelValue: any, viewValue: any) => boolean; + } + + interface IAsyncModelValidators { + [index: string]: (modelValue: any, viewValue: any) => IPromise; + } + + interface IModelParser { + (value: any): any; + } + + interface IModelFormatter { + (value: any): any; + } + + interface IModelViewChangeListener { + (): void; + } + + /** + * $rootScope - $rootScopeProvider - service in module ng + * see https://docs.angularjs.org/api/ng/type/$rootScope.Scope and https://docs.angularjs.org/api/ng/service/$rootScope + */ + interface IRootScopeService { + [index: string]: any; + + $apply(): any; + $apply(exp: string): any; + $apply(exp: (scope: IScope) => any): any; + + $applyAsync(): any; + $applyAsync(exp: string): any; + $applyAsync(exp: (scope: IScope) => any): any; + + /** + * Dispatches an event name downwards to all child scopes (and their children) notifying the registered $rootScope.Scope listeners. + * + * The event life cycle starts at the scope on which $broadcast was called. All listeners listening for name event on this scope get notified. Afterwards, the event propagates to all direct and indirect scopes of the current scope and calls all registered listeners along the way. The event cannot be canceled. + * + * Any exception emitted from the listeners will be passed onto the $exceptionHandler service. + * + * @param name Event name to broadcast. + * @param args Optional one or more arguments which will be passed onto the event listeners. + */ + $broadcast(name: string, ...args: any[]): IAngularEvent; + $destroy(): void; + $digest(): void; + /** + * Dispatches an event name upwards through the scope hierarchy notifying the registered $rootScope.Scope listeners. + * + * The event life cycle starts at the scope on which $emit was called. All listeners listening for name event on this scope get notified. Afterwards, the event traverses upwards toward the root scope and calls all registered listeners along the way. The event will stop propagating if one of the listeners cancels it. + * + * Any exception emitted from the listeners will be passed onto the $exceptionHandler service. + * + * @param name Event name to emit. + * @param args Optional one or more arguments which will be passed onto the event listeners. + */ + $emit(name: string, ...args: any[]): IAngularEvent; + + $eval(): any; + $eval(expression: string, locals?: Object): any; + $eval(expression: (scope: IScope) => any, locals?: Object): any; + + $evalAsync(): void; + $evalAsync(expression: string): void; + $evalAsync(expression: (scope: IScope) => any): void; + + // Defaults to false by the implementation checking strategy + $new(isolate?: boolean, parent?: IScope): IScope; + + /** + * Listens on events of a given type. See $emit for discussion of event life cycle. + * + * The event listener function format is: function(event, args...). + * + * @param name Event name to listen on. + * @param listener Function to call when the event is emitted. + */ + $on(name: string, listener: (event: IAngularEvent, ...args: any[]) => any): Function; + + $watch(watchExpression: string, listener?: string, objectEquality?: boolean): Function; + $watch(watchExpression: string, listener?: (newValue: T, oldValue: T, scope: IScope) => any, objectEquality?: boolean): Function; + $watch(watchExpression: (scope: IScope) => any, listener?: string, objectEquality?: boolean): Function; + $watch(watchExpression: (scope: IScope) => T, listener?: (newValue: T, oldValue: T, scope: IScope) => any, objectEquality?: boolean): Function; + + $watchCollection(watchExpression: string, listener: (newValue: T, oldValue: T, scope: IScope) => any): Function; + $watchCollection(watchExpression: (scope: IScope) => T, listener: (newValue: T, oldValue: T, scope: IScope) => any): Function; + + $watchGroup(watchExpressions: any[], listener: (newValue: any, oldValue: any, scope: IScope) => any): Function; + $watchGroup(watchExpressions: { (scope: IScope): any }[], listener: (newValue: any, oldValue: any, scope: IScope) => any): Function; + + $parent: IScope; + $root: IRootScopeService; + $id: number; + + // Hidden members + $$isolateBindings: any; + $$phase: any; + } + + interface IScope extends IRootScopeService { } + + /** + * $scope for ngRepeat directive. + * see https://docs.angularjs.org/api/ng/directive/ngRepeat + */ + interface IRepeatScope extends IScope { + + /** + * iterator offset of the repeated element (0..length-1). + */ + $index: number; + + /** + * true if the repeated element is first in the iterator. + */ + $first: boolean; + + /** + * true if the repeated element is between the first and last in the iterator. + */ + $middle: boolean; + + /** + * true if the repeated element is last in the iterator. + */ + $last: boolean; + + /** + * true if the iterator position $index is even (otherwise false). + */ + $even: boolean; + + /** + * true if the iterator position $index is odd (otherwise false). + */ + $odd: boolean; + + } + + interface IAngularEvent { + /** + * the scope on which the event was $emit-ed or $broadcast-ed. + */ + targetScope: IScope; + /** + * the scope that is currently handling the event. Once the event propagates through the scope hierarchy, this property is set to null. + */ + currentScope: IScope; + /** + * name of the event. + */ + name: string; + /** + * calling stopPropagation function will cancel further event propagation (available only for events that were $emit-ed). + */ + stopPropagation?: Function; + /** + * calling preventDefault sets defaultPrevented flag to true. + */ + preventDefault: Function; + /** + * true if preventDefault was called. + */ + defaultPrevented: boolean; + } + + /////////////////////////////////////////////////////////////////////////// + // WindowService + // see http://docs.angularjs.org/api/ng.$window + /////////////////////////////////////////////////////////////////////////// + interface IWindowService extends Window { + [key: string]: any; + } + + /////////////////////////////////////////////////////////////////////////// + // BrowserService + // TODO undocumented, so we need to get it from the source code + /////////////////////////////////////////////////////////////////////////// + interface IBrowserService { + defer: angular.ITimeoutService; + [key: string]: any; + } + + /////////////////////////////////////////////////////////////////////////// + // TimeoutService + // see http://docs.angularjs.org/api/ng.$timeout + /////////////////////////////////////////////////////////////////////////// + interface ITimeoutService { + (delay?: number, invokeApply?: boolean): IPromise; + (fn: (...args: any[]) => T, delay?: number, invokeApply?: boolean, ...args: any[]): IPromise; + cancel(promise?: IPromise): boolean; + } + + /////////////////////////////////////////////////////////////////////////// + // IntervalService + // see http://docs.angularjs.org/api/ng.$interval + /////////////////////////////////////////////////////////////////////////// + interface IIntervalService { + (func: Function, delay: number, count?: number, invokeApply?: boolean): IPromise; + cancel(promise: IPromise): boolean; + } + + /////////////////////////////////////////////////////////////////////////// + // AngularProvider + // see http://docs.angularjs.org/api/ng/provider/$animateProvider + /////////////////////////////////////////////////////////////////////////// + interface IAnimateProvider { + /** + * Registers a new injectable animation factory function. + * + * @param name The name of the animation. + * @param factory The factory function that will be executed to return the animation object. + */ + register(name: string, factory: () => IAnimateCallbackObject): void; + + /** + * Gets and/or sets the CSS class expression that is checked when performing an animation. + * + * @param expression The className expression which will be checked against all animations. + * @returns The current CSS className expression value. If null then there is no expression value. + */ + classNameFilter(expression?: RegExp): RegExp; + } + + /** + * The animation object which contains callback functions for each event that is expected to be animated. + */ + interface IAnimateCallbackObject { + eventFn(element: Node, doneFn: () => void): Function; + } + + /** + * $filter - $filterProvider - service in module ng + * + * Filters are used for formatting data displayed to the user. + * + * see https://docs.angularjs.org/api/ng/service/$filter + */ + interface IFilterService { + /** + * Usage: + * $filter(name); + * + * @param name Name of the filter function to retrieve + */ + (name: string): Function; + } + + /** + * $filterProvider - $filter - provider in module ng + * + * Filters are just functions which transform input to an output. However filters need to be Dependency Injected. To achieve this a filter definition consists of a factory function which is annotated with dependencies and is responsible for creating a filter function. + * + * see https://docs.angularjs.org/api/ng/provider/$filterProvider + */ + interface IFilterProvider extends IServiceProvider { + /** + * register(name); + * + * @param name Name of the filter function, or an object map of filters where the keys are the filter names and the values are the filter factories. Note: Filter names must be valid angular Expressions identifiers, such as uppercase or orderBy. Names with special characters, such as hyphens and dots, are not allowed. If you wish to namespace your filters, then you can use capitalization (myappSubsectionFilterx) or underscores (myapp_subsection_filterx). + */ + register(name: string | {}): IServiceProvider; + } + + /////////////////////////////////////////////////////////////////////////// + // LocaleService + // see http://docs.angularjs.org/api/ng.$locale + /////////////////////////////////////////////////////////////////////////// + interface ILocaleService { + id: string; + + // These are not documented + // Check angular's i18n files for exemples + NUMBER_FORMATS: ILocaleNumberFormatDescriptor; + DATETIME_FORMATS: ILocaleDateTimeFormatDescriptor; + pluralCat: (num: any) => string; + } + + interface ILocaleNumberFormatDescriptor { + DECIMAL_SEP: string; + GROUP_SEP: string; + PATTERNS: ILocaleNumberPatternDescriptor[]; + CURRENCY_SYM: string; + } + + interface ILocaleNumberPatternDescriptor { + minInt: number; + minFrac: number; + maxFrac: number; + posPre: string; + posSuf: string; + negPre: string; + negSuf: string; + gSize: number; + lgSize: number; + } + + interface ILocaleDateTimeFormatDescriptor { + MONTH: string[]; + SHORTMONTH: string[]; + DAY: string[]; + SHORTDAY: string[]; + AMPMS: string[]; + medium: string; + short: string; + fullDate: string; + longDate: string; + mediumDate: string; + shortDate: string; + mediumTime: string; + shortTime: string; + } + + /////////////////////////////////////////////////////////////////////////// + // LogService + // see http://docs.angularjs.org/api/ng.$log + // see http://docs.angularjs.org/api/ng.$logProvider + /////////////////////////////////////////////////////////////////////////// + interface ILogService { + debug: ILogCall; + error: ILogCall; + info: ILogCall; + log: ILogCall; + warn: ILogCall; + } + + interface ILogProvider extends IServiceProvider { + debugEnabled(): boolean; + debugEnabled(enabled: boolean): ILogProvider; + } + + // We define this as separate interface so we can reopen it later for + // the ngMock module. + interface ILogCall { + (...args: any[]): void; + } + + /////////////////////////////////////////////////////////////////////////// + // ParseService + // see http://docs.angularjs.org/api/ng.$parse + // see http://docs.angularjs.org/api/ng.$parseProvider + /////////////////////////////////////////////////////////////////////////// + interface IParseService { + (expression: string): ICompiledExpression; + } + + interface IParseProvider { + logPromiseWarnings(): boolean; + logPromiseWarnings(value: boolean): IParseProvider; + + unwrapPromises(): boolean; + unwrapPromises(value: boolean): IParseProvider; + } + + interface ICompiledExpression { + (context: any, locals?: any): any; + + // If value is not provided, undefined is gonna be used since the implementation + // does not check the parameter. Let's force a value for consistency. If consumer + // whants to undefine it, pass the undefined value explicitly. + assign(context: any, value: any): any; + } + + /** + * $location - $locationProvider - service in module ng + * see https://docs.angularjs.org/api/ng/service/$location + */ + interface ILocationService { + absUrl(): string; + hash(): string; + hash(newHash: string): ILocationService; + host(): string; + + /** + * Return path of current url + */ + path(): string; + + /** + * Change path when called with parameter and return $location. + * Note: Path should always begin with forward slash (/), this method will add the forward slash if it is missing. + * + * @param path New path + */ + path(path: string): ILocationService; + + port(): number; + protocol(): string; + replace(): ILocationService; + + /** + * Return search part (as object) of current url + */ + search(): any; + + /** + * Change search part when called with parameter and return $location. + * + * @param search When called with a single argument the method acts as a setter, setting the search component of $location to the specified value. + * + * If the argument is a hash object containing an array of values, these values will be encoded as duplicate search parameters in the url. + */ + search(search: any): ILocationService; + + /** + * Change search part when called with parameter and return $location. + * + * @param search New search params + * @param paramValue If search is a string or a Number, then paramValue will override only a single search property. If paramValue is null, the property specified via the first argument will be deleted. If paramValue is an array, it will override the property of the search component of $location specified via the first argument. If paramValue is true, the property specified via the first argument will be added with no value nor trailing equal sign. + */ + search(search: string, paramValue: string|number|string[]|boolean): ILocationService; + + state(): any; + state(state: any): ILocationService; + url(): string; + url(url: string): ILocationService; + } + + interface ILocationProvider extends IServiceProvider { + hashPrefix(): string; + hashPrefix(prefix: string): ILocationProvider; + html5Mode(): boolean; + + // Documentation states that parameter is string, but + // implementation tests it as boolean, which makes more sense + // since this is a toggler + html5Mode(active: boolean): ILocationProvider; + html5Mode(mode: { enabled?: boolean; requireBase?: boolean; rewriteLinks?: boolean; }): ILocationProvider; + } + + /////////////////////////////////////////////////////////////////////////// + // DocumentService + // see http://docs.angularjs.org/api/ng.$document + /////////////////////////////////////////////////////////////////////////// + interface IDocumentService extends IAugmentedJQuery {} + + /////////////////////////////////////////////////////////////////////////// + // ExceptionHandlerService + // see http://docs.angularjs.org/api/ng.$exceptionHandler + /////////////////////////////////////////////////////////////////////////// + interface IExceptionHandlerService { + (exception: Error, cause?: string): void; + } + + /////////////////////////////////////////////////////////////////////////// + // RootElementService + // see http://docs.angularjs.org/api/ng.$rootElement + /////////////////////////////////////////////////////////////////////////// + interface IRootElementService extends JQuery {} + + interface IQResolveReject { + (): void; + (value: T): void; + } + /** + * $q - service in module ng + * A promise/deferred implementation inspired by Kris Kowal's Q. + * See http://docs.angularjs.org/api/ng/service/$q + */ + interface IQService { + new (resolver: (resolve: IQResolveReject) => any): IPromise; + new (resolver: (resolve: IQResolveReject, reject: IQResolveReject) => any): IPromise; + (resolver: (resolve: IQResolveReject) => any): IPromise; + (resolver: (resolve: IQResolveReject, reject: IQResolveReject) => any): IPromise; + + /** + * Combines multiple promises into a single promise that is resolved when all of the input promises are resolved. + * + * Returns a single promise that will be resolved with an array of values, each value corresponding to the promise at the same index in the promises array. If any of the promises is resolved with a rejection, this resulting promise will be rejected with the same rejection value. + * + * @param promises An array of promises. + */ + all(promises: IPromise[]): IPromise; + /** + * Combines multiple promises into a single promise that is resolved when all of the input promises are resolved. + * + * Returns a single promise that will be resolved with a hash of values, each value corresponding to the promise at the same key in the promises hash. If any of the promises is resolved with a rejection, this resulting promise will be rejected with the same rejection value. + * + * @param promises A hash of promises. + */ + all(promises: { [id: string]: IPromise; }): IPromise<{ [id: string]: any; }>; + all(promises: { [id: string]: IPromise; }): IPromise; + /** + * Creates a Deferred object which represents a task which will finish in the future. + */ + defer(): IDeferred; + /** + * Creates a promise that is resolved as rejected with the specified reason. This api should be used to forward rejection in a chain of promises. If you are dealing with the last promise in a promise chain, you don't need to worry about it. + * + * When comparing deferreds/promises to the familiar behavior of try/catch/throw, think of reject as the throw keyword in JavaScript. This also means that if you "catch" an error via a promise error callback and you want to forward the error to the promise derived from the current promise, you have to "rethrow" the error by returning a rejection constructed via reject. + * + * @param reason Constant, message, exception or an object representing the rejection reason. + */ + reject(reason?: any): IPromise; + /** + * Wraps an object that might be a value or a (3rd party) then-able promise into a $q promise. This is useful when you are dealing with an object that might or might not be a promise, or if the promise comes from a source that can't be trusted. + * + * @param value Value or a promise + */ + when(value: IPromise|T): IPromise; + /** + * Wraps an object that might be a value or a (3rd party) then-able promise into a $q promise. This is useful when you are dealing with an object that might or might not be a promise, or if the promise comes from a source that can't be trusted. + * + * @param value Value or a promise + */ + when(): IPromise; + } + + interface IPromise { + /** + * Regardless of when the promise was or will be resolved or rejected, then calls one of the success or error callbacks asynchronously as soon as the result is available. The callbacks are called with a single argument: the result or rejection reason. Additionally, the notify callback may be called zero or more times to provide a progress indication, before the promise is resolved or rejected. + * The successCallBack may return IPromise for when a $q.reject() needs to be returned + * This method returns a new promise which is resolved or rejected via the return value of the successCallback, errorCallback. It also notifies via the return value of the notifyCallback method. The promise can not be resolved or rejected from the notifyCallback method. + */ + then(successCallback: (promiseValue: T) => IHttpPromise|IPromise|TResult|IPromise, errorCallback?: (reason: any) => any, notifyCallback?: (state: any) => any): IPromise; + + /** + * Shorthand for promise.then(null, errorCallback) + */ + catch(onRejected: (reason: any) => IHttpPromise|IPromise|TResult): IPromise; + + /** + * Allows you to observe either the fulfillment or rejection of a promise, but to do so without modifying the final value. This is useful to release resources or do some clean-up that needs to be done whether the promise was rejected or resolved. See the full specification for more information. + * + * Because finally is a reserved word in JavaScript and reserved keywords are not supported as property names by ES3, you'll need to invoke the method like promise['finally'](callback) to make your code IE8 and Android 2.x compatible. + */ + finally(finallyCallback: () => any): IPromise; + } + + interface IDeferred { + resolve(value?: T): void; + reject(reason?: any): void; + notify(state?: any): void; + promise: IPromise; + } + + /////////////////////////////////////////////////////////////////////////// + // AnchorScrollService + // see http://docs.angularjs.org/api/ng.$anchorScroll + /////////////////////////////////////////////////////////////////////////// + interface IAnchorScrollService { + (): void; + (hash: string): void; + yOffset: any; + } + + interface IAnchorScrollProvider extends IServiceProvider { + disableAutoScrolling(): void; + } + + /** + * $cacheFactory - service in module ng + * + * Factory that constructs Cache objects and gives access to them. + * + * see https://docs.angularjs.org/api/ng/service/$cacheFactory + */ + interface ICacheFactoryService { + /** + * Factory that constructs Cache objects and gives access to them. + * + * @param cacheId Name or id of the newly created cache. + * @param optionsMap Options object that specifies the cache behavior. Properties: + * + * capacity — turns the cache into LRU cache. + */ + (cacheId: string, optionsMap?: { capacity?: number; }): ICacheObject; + + /** + * Get information about all the caches that have been created. + * @returns key-value map of cacheId to the result of calling cache#info + */ + info(): any; + + /** + * Get access to a cache object by the cacheId used when it was created. + * + * @param cacheId Name or id of a cache to access. + */ + get(cacheId: string): ICacheObject; + } + + /** + * $cacheFactory.Cache - type in module ng + * + * A cache object used to store and retrieve data, primarily used by $http and the script directive to cache templates and other data. + * + * see https://docs.angularjs.org/api/ng/type/$cacheFactory.Cache + */ + interface ICacheObject { + /** + * Retrieve information regarding a particular Cache. + */ + info(): { + /** + * the id of the cache instance + */ + id: string; + + /** + * the number of entries kept in the cache instance + */ + size: number; + + //...: any additional properties from the options object when creating the cache. + }; + + /** + * Inserts a named entry into the Cache object to be retrieved later, and incrementing the size of the cache if the key was not already present in the cache. If behaving like an LRU cache, it will also remove stale entries from the set. + * + * It will not insert undefined values into the cache. + * + * @param key the key under which the cached data is stored. + * @param value the value to store alongside the key. If it is undefined, the key will not be stored. + */ + put(key: string, value?: T): T; + + /** + * Retrieves named data stored in the Cache object. + * + * @param key the key of the data to be retrieved + */ + get(key: string): T; + + /** + * Removes an entry from the Cache object. + * + * @param key the key of the entry to be removed + */ + remove(key: string): void; + + /** + * Clears the cache object of any entries. + */ + removeAll(): void; + + /** + * Destroys the Cache object entirely, removing it from the $cacheFactory set. + */ + destroy(): void; + } + + /////////////////////////////////////////////////////////////////////////// + // CompileService + // see http://docs.angularjs.org/api/ng.$compile + // see http://docs.angularjs.org/api/ng.$compileProvider + /////////////////////////////////////////////////////////////////////////// + interface ICompileService { + (element: string, transclude?: ITranscludeFunction, maxPriority?: number): ITemplateLinkingFunction; + (element: Element, transclude?: ITranscludeFunction, maxPriority?: number): ITemplateLinkingFunction; + (element: JQuery, transclude?: ITranscludeFunction, maxPriority?: number): ITemplateLinkingFunction; + } + + interface ICompileProvider extends IServiceProvider { + directive(name: string, directiveFactory: Function): ICompileProvider; + + // Undocumented, but it is there... + directive(directivesMap: any): ICompileProvider; + + aHrefSanitizationWhitelist(): RegExp; + aHrefSanitizationWhitelist(regexp: RegExp): ICompileProvider; + + imgSrcSanitizationWhitelist(): RegExp; + imgSrcSanitizationWhitelist(regexp: RegExp): ICompileProvider; + + debugInfoEnabled(enabled?: boolean): any; + } + + interface ICloneAttachFunction { + // Let's hint but not force cloneAttachFn's signature + (clonedElement?: JQuery, scope?: IScope): any; + } + + // This corresponds to the "publicLinkFn" returned by $compile. + interface ITemplateLinkingFunction { + (scope: IScope, cloneAttachFn?: ICloneAttachFunction): IAugmentedJQuery; + } + + // This corresponds to $transclude (and also the transclude function passed to link). + interface ITranscludeFunction { + // If the scope is provided, then the cloneAttachFn must be as well. + (scope: IScope, cloneAttachFn: ICloneAttachFunction): IAugmentedJQuery; + // If one argument is provided, then it's assumed to be the cloneAttachFn. + (cloneAttachFn?: ICloneAttachFunction): IAugmentedJQuery; + } + + /////////////////////////////////////////////////////////////////////////// + // ControllerService + // see http://docs.angularjs.org/api/ng.$controller + // see http://docs.angularjs.org/api/ng.$controllerProvider + /////////////////////////////////////////////////////////////////////////// + interface IControllerService { + // Although the documentation doesn't state this, locals are optional + (controllerConstructor: Function, locals?: any, bindToController?: any): any; + (controllerName: string, locals?: any, bindToController?: any): any; + } + + interface IControllerProvider extends IServiceProvider { + register(name: string, controllerConstructor: Function): void; + register(name: string, dependencyAnnotatedConstructor: any[]): void; + allowGlobals(): void; + } + + /** + * HttpService + * see http://docs.angularjs.org/api/ng/service/$http + */ + interface IHttpService { + /** + * Object describing the request to be made and how it should be processed. + */ + (config: IRequestConfig): IHttpPromise; + + /** + * Shortcut method to perform GET request. + * + * @param url Relative or absolute URL specifying the destination of the request + * @param config Optional configuration object + */ + get(url: string, config?: IRequestShortcutConfig): IHttpPromise; + + /** + * Shortcut method to perform DELETE request. + * + * @param url Relative or absolute URL specifying the destination of the request + * @param config Optional configuration object + */ + delete(url: string, config?: IRequestShortcutConfig): IHttpPromise; + + /** + * Shortcut method to perform HEAD request. + * + * @param url Relative or absolute URL specifying the destination of the request + * @param config Optional configuration object + */ + head(url: string, config?: IRequestShortcutConfig): IHttpPromise; + + /** + * Shortcut method to perform JSONP request. + * + * @param url Relative or absolute URL specifying the destination of the request + * @param config Optional configuration object + */ + jsonp(url: string, config?: IRequestShortcutConfig): IHttpPromise; + + /** + * Shortcut method to perform POST request. + * + * @param url Relative or absolute URL specifying the destination of the request + * @param data Request content + * @param config Optional configuration object + */ + post(url: string, data: any, config?: IRequestShortcutConfig): IHttpPromise; + + /** + * Shortcut method to perform PUT request. + * + * @param url Relative or absolute URL specifying the destination of the request + * @param data Request content + * @param config Optional configuration object + */ + put(url: string, data: any, config?: IRequestShortcutConfig): IHttpPromise; + + /** + * Shortcut method to perform PATCH request. + * + * @param url Relative or absolute URL specifying the destination of the request + * @param data Request content + * @param config Optional configuration object + */ + patch(url: string, data: any, config?: IRequestShortcutConfig): IHttpPromise; + + /** + * Runtime equivalent of the $httpProvider.defaults property. Allows configuration of default headers, withCredentials as well as request and response transformations. + */ + defaults: IRequestConfig; + + /** + * Array of config objects for currently pending requests. This is primarily meant to be used for debugging purposes. + */ + pendingRequests: any[]; + } + + /** + * Object describing the request to be made and how it should be processed. + * see http://docs.angularjs.org/api/ng/service/$http#usage + */ + interface IRequestShortcutConfig { + /** + * {Object.} + * Map of strings or objects which will be turned to ?key1=value1&key2=value2 after the url. If the value is not a string, it will be JSONified. + */ + params?: any; + + /** + * Map of strings or functions which return strings representing HTTP headers to send to the server. If the return value of a function is null, the header will not be sent. + */ + headers?: any; + + /** + * Name of HTTP header to populate with the XSRF token. + */ + xsrfHeaderName?: string; + + /** + * Name of cookie containing the XSRF token. + */ + xsrfCookieName?: string; + + /** + * {boolean|Cache} + * If true, a default $http cache will be used to cache the GET request, otherwise if a cache instance built with $cacheFactory, this cache will be used for caching. + */ + cache?: any; + + /** + * whether to to set the withCredentials flag on the XHR object. See [requests with credentials]https://developer.mozilla.org/en/http_access_control#section_5 for more information. + */ + withCredentials?: boolean; + + /** + * {string|Object} + * Data to be sent as the request message data. + */ + data?: any; + + /** + * {function(data, headersGetter)|Array.} + * Transform function or an array of such functions. The transform function takes the http request body and headers and returns its transformed (typically serialized) version. + */ + transformRequest?: any; + + /** + * {function(data, headersGetter)|Array.} + * Transform function or an array of such functions. The transform function takes the http response body and headers and returns its transformed (typically deserialized) version. + */ + transformResponse?: any; + + /** + * {number|Promise} + * Timeout in milliseconds, or promise that should abort the request when resolved. + */ + timeout?: any; + + /** + * See requestType. + */ + responseType?: string; + } + + /** + * Object describing the request to be made and how it should be processed. + * see http://docs.angularjs.org/api/ng/service/$http#usage + */ + interface IRequestConfig extends IRequestShortcutConfig { + /** + * HTTP method (e.g. 'GET', 'POST', etc) + */ + method: string; + /** + * Absolute or relative URL of the resource that is being requested. + */ + url: string; + } + + interface IHttpHeadersGetter { + (): { [name: string]: string; }; + (headerName: string): string; + } + + interface IHttpPromiseCallback { + (data: T, status: number, headers: IHttpHeadersGetter, config: IRequestConfig): void; + } + + interface IHttpPromiseCallbackArg { + data?: T; + status?: number; + headers?: IHttpHeadersGetter; + config?: IRequestConfig; + statusText?: string; + } + + interface IHttpPromise extends IPromise> { + success(callback: IHttpPromiseCallback): IHttpPromise; + error(callback: IHttpPromiseCallback): IHttpPromise; + then(successCallback: (response: IHttpPromiseCallbackArg) => IPromise|TResult, errorCallback?: (response: IHttpPromiseCallbackArg) => any): IPromise; + } + + /** + * Object that controls the defaults for $http provider + * https://docs.angularjs.org/api/ng/service/$http#defaults + */ + interface IHttpProviderDefaults { + cache?: boolean; + /** + * Transform function or an array of such functions. The transform function takes the http request body and + * headers and returns its transformed (typically serialized) version. + */ + transformRequest?: ((data: any, headersGetter?: any) => any)|((data: any, headersGetter?: any) => any)[]; + xsrfCookieName?: string; + xsrfHeaderName?: string; + withCredentials?: boolean; + headers?: { + common?: any; + post?: any; + put?: any; + patch?: any; + } + } + + interface IHttpProvider extends IServiceProvider { + defaults: IHttpProviderDefaults; + interceptors: any[]; + useApplyAsync(): boolean; + useApplyAsync(value: boolean): IHttpProvider; + + /** + * + * @param {boolean=} value If true, `$http` will return a normal promise without the `success` and `error` methods. + * @returns {boolean|Object} If a value is specified, returns the $httpProvider for chaining. + * otherwise, returns the current configured value. + */ + useLegacyPromiseExtensions(value:boolean) : boolean | IHttpProvider; + } + + /////////////////////////////////////////////////////////////////////////// + // HttpBackendService + // see http://docs.angularjs.org/api/ng.$httpBackend + // You should never need to use this service directly. + /////////////////////////////////////////////////////////////////////////// + interface IHttpBackendService { + // XXX Perhaps define callback signature in the future + (method: string, url: string, post?: any, callback?: Function, headers?: any, timeout?: number, withCredentials?: boolean): void; + } + + /////////////////////////////////////////////////////////////////////////// + // InterpolateService + // see http://docs.angularjs.org/api/ng.$interpolate + // see http://docs.angularjs.org/api/ng.$interpolateProvider + /////////////////////////////////////////////////////////////////////////// + interface IInterpolateService { + (text: string, mustHaveExpression?: boolean, trustedContext?: string, allOrNothing?: boolean): IInterpolationFunction; + endSymbol(): string; + startSymbol(): string; + } + + interface IInterpolationFunction { + (context: any): string; + } + + interface IInterpolateProvider extends IServiceProvider { + startSymbol(): string; + startSymbol(value: string): IInterpolateProvider; + endSymbol(): string; + endSymbol(value: string): IInterpolateProvider; + } + + /////////////////////////////////////////////////////////////////////////// + // TemplateCacheService + // see http://docs.angularjs.org/api/ng.$templateCache + /////////////////////////////////////////////////////////////////////////// + interface ITemplateCacheService extends ICacheObject {} + + /////////////////////////////////////////////////////////////////////////// + // SCEService + // see http://docs.angularjs.org/api/ng.$sce + /////////////////////////////////////////////////////////////////////////// + interface ISCEService { + getTrusted(type: string, mayBeTrusted: any): any; + getTrustedCss(value: any): any; + getTrustedHtml(value: any): any; + getTrustedJs(value: any): any; + getTrustedResourceUrl(value: any): any; + getTrustedUrl(value: any): any; + parse(type: string, expression: string): (context: any, locals: any) => any; + parseAsCss(expression: string): (context: any, locals: any) => any; + parseAsHtml(expression: string): (context: any, locals: any) => any; + parseAsJs(expression: string): (context: any, locals: any) => any; + parseAsResourceUrl(expression: string): (context: any, locals: any) => any; + parseAsUrl(expression: string): (context: any, locals: any) => any; + trustAs(type: string, value: any): any; + trustAsHtml(value: any): any; + trustAsJs(value: any): any; + trustAsResourceUrl(value: any): any; + trustAsUrl(value: any): any; + isEnabled(): boolean; + } + + /////////////////////////////////////////////////////////////////////////// + // SCEProvider + // see http://docs.angularjs.org/api/ng.$sceProvider + /////////////////////////////////////////////////////////////////////////// + interface ISCEProvider extends IServiceProvider { + enabled(value: boolean): void; + } + + /////////////////////////////////////////////////////////////////////////// + // SCEDelegateService + // see http://docs.angularjs.org/api/ng.$sceDelegate + /////////////////////////////////////////////////////////////////////////// + interface ISCEDelegateService { + getTrusted(type: string, mayBeTrusted: any): any; + trustAs(type: string, value: any): any; + valueOf(value: any): any; + } + + + /////////////////////////////////////////////////////////////////////////// + // SCEDelegateProvider + // see http://docs.angularjs.org/api/ng.$sceDelegateProvider + /////////////////////////////////////////////////////////////////////////// + interface ISCEDelegateProvider extends IServiceProvider { + resourceUrlBlacklist(blacklist: any[]): void; + resourceUrlWhitelist(whitelist: any[]): void; + resourceUrlBlacklist(): any[]; + resourceUrlWhitelist(): any[]; + } + + /** + * $templateRequest service + * see http://docs.angularjs.org/api/ng/service/$templateRequest + */ + interface ITemplateRequestService { + /** + * Downloads a template using $http and, upon success, stores the + * contents inside of $templateCache. + * + * If the HTTP request fails or the response data of the HTTP request is + * empty then a $compile error will be thrown (unless + * {ignoreRequestError} is set to true). + * + * @param tpl The template URL. + * @param ignoreRequestError Whether or not to ignore the exception + * when the request fails or the template is + * empty. + * + * @return A promise whose value is the template content. + */ + (tpl: string, ignoreRequestError?: boolean): IPromise; + /** + * total amount of pending template requests being downloaded. + * @type {number} + */ + totalPendingRequests: number; + } + + /////////////////////////////////////////////////////////////////////////// + // Directive + // see http://docs.angularjs.org/api/ng.$compileProvider#directive + // and http://docs.angularjs.org/guide/directive + /////////////////////////////////////////////////////////////////////////// + + interface IDirectiveFactory { + (...args: any[]): IDirective; + } + + interface IDirectiveLinkFn { + ( + scope: IScope, + instanceElement: IAugmentedJQuery, + instanceAttributes: IAttributes, + controller: {}, + transclude: ITranscludeFunction + ): void; + } + + interface IDirectivePrePost { + pre?: IDirectiveLinkFn; + post?: IDirectiveLinkFn; + } + + interface IDirectiveCompileFn { + ( + templateElement: IAugmentedJQuery, + templateAttributes: IAttributes, + transclude: ITranscludeFunction + ): IDirectivePrePost; + } + + interface IDirective { + compile?: IDirectiveCompileFn; + controller?: any; + controllerAs?: string; + bindToController?: boolean|Object; + link?: IDirectiveLinkFn | IDirectivePrePost; + name?: string; + priority?: number; + replace?: boolean; + require?: any; + restrict?: string; + scope?: any; + template?: any; + templateUrl?: any; + terminal?: boolean; + transclude?: any; + } + + /** + * angular.element + * when calling angular.element, angular returns a jQuery object, + * augmented with additional methods like e.g. scope. + * see: http://docs.angularjs.org/api/angular.element + */ + interface IAugmentedJQueryStatic extends JQueryStatic { + (selector: string, context?: any): IAugmentedJQuery; + (element: Element): IAugmentedJQuery; + (object: {}): IAugmentedJQuery; + (elementArray: Element[]): IAugmentedJQuery; + (object: JQuery): IAugmentedJQuery; + (func: Function): IAugmentedJQuery; + (array: any[]): IAugmentedJQuery; + (): IAugmentedJQuery; + } + + interface IAugmentedJQuery extends JQuery { + // TODO: events, how to define? + //$destroy + + find(selector: string): IAugmentedJQuery; + find(element: any): IAugmentedJQuery; + find(obj: JQuery): IAugmentedJQuery; + controller(): any; + controller(name: string): any; + injector(): any; + scope(): IScope; + isolateScope(): IScope; + + inheritedData(key: string, value: any): JQuery; + inheritedData(obj: { [key: string]: any; }): JQuery; + inheritedData(key?: string): any; + } + + /////////////////////////////////////////////////////////////////////// + // AnimateService + // see http://docs.angularjs.org/api/ng.$animate + /////////////////////////////////////////////////////////////////////// + interface IAnimateService { + addClass(element: JQuery, className: string, done?: Function): IPromise; + enter(element: JQuery, parent: JQuery, after: JQuery, done?: Function): void; + leave(element: JQuery, done?: Function): void; + move(element: JQuery, parent: JQuery, after: JQuery, done?: Function): void; + removeClass(element: JQuery, className: string, done?: Function): void; + } + + /////////////////////////////////////////////////////////////////////////// + // AUTO module (angular.js) + /////////////////////////////////////////////////////////////////////////// + export module auto { + + /////////////////////////////////////////////////////////////////////// + // InjectorService + // see http://docs.angularjs.org/api/AUTO.$injector + /////////////////////////////////////////////////////////////////////// + interface IInjectorService { + annotate(fn: Function): string[]; + annotate(inlineAnnotatedFunction: any[]): string[]; + get(name: string): T; + has(name: string): boolean; + instantiate(typeConstructor: Function, locals?: any): T; + invoke(inlineAnnotatedFunction: any[]): any; + invoke(func: Function, context?: any, locals?: any): any; + } + + /////////////////////////////////////////////////////////////////////// + // ProvideService + // see http://docs.angularjs.org/api/AUTO.$provide + /////////////////////////////////////////////////////////////////////// + interface IProvideService { + // Documentation says it returns the registered instance, but actual + // implementation does not return anything. + // constant(name: string, value: any): any; + /** + * Register a constant service, such as a string, a number, an array, an object or a function, with the $injector. Unlike value it can be injected into a module configuration function (see config) and it cannot be overridden by an Angular decorator. + * + * @param name The name of the constant. + * @param value The constant value. + */ + constant(name: string, value: any): void; + + /** + * Register a service decorator with the $injector. A service decorator intercepts the creation of a service, allowing it to override or modify the behaviour of the service. The object returned by the decorator may be the original service, or a new service object which replaces or wraps and delegates to the original service. + * + * @param name The name of the service to decorate. + * @param decorator This function will be invoked when the service needs to be instantiated and should return the decorated service instance. The function is called using the injector.invoke method and is therefore fully injectable. Local injection arguments: + * + * $delegate - The original service instance, which can be monkey patched, configured, decorated or delegated to. + */ + decorator(name: string, decorator: Function): void; + /** + * Register a service decorator with the $injector. A service decorator intercepts the creation of a service, allowing it to override or modify the behaviour of the service. The object returned by the decorator may be the original service, or a new service object which replaces or wraps and delegates to the original service. + * + * @param name The name of the service to decorate. + * @param inlineAnnotatedFunction This function will be invoked when the service needs to be instantiated and should return the decorated service instance. The function is called using the injector.invoke method and is therefore fully injectable. Local injection arguments: + * + * $delegate - The original service instance, which can be monkey patched, configured, decorated or delegated to. + */ + decorator(name: string, inlineAnnotatedFunction: any[]): void; + factory(name: string, serviceFactoryFunction: Function): IServiceProvider; + factory(name: string, inlineAnnotatedFunction: any[]): IServiceProvider; + provider(name: string, provider: IServiceProvider): IServiceProvider; + provider(name: string, serviceProviderConstructor: Function): IServiceProvider; + service(name: string, constructor: Function): IServiceProvider; + value(name: string, value: any): IServiceProvider; + } + + } +} diff --git a/public/app/headers/common.d.ts b/public/app/headers/common.d.ts new file mode 100644 index 0000000000000..5f97278274fc6 --- /dev/null +++ b/public/app/headers/common.d.ts @@ -0,0 +1,17 @@ +/// +/// +/// +/// + +// dummy modules +declare module 'app/core/config' { + var config : any; + export = config; +} + +declare module 'app/core/utils/kbn' { + var kbn : any; + export = kbn; +} + + diff --git a/public/app/headers/jquery/jquery.d.ts b/public/app/headers/jquery/jquery.d.ts new file mode 100644 index 0000000000000..70737253efda6 --- /dev/null +++ b/public/app/headers/jquery/jquery.d.ts @@ -0,0 +1,3178 @@ +// Type definitions for jQuery 1.10.x / 2.0.x +// Project: http://jquery.com/ +// Definitions by: Boris Yankov , Christian Hoffmeister , Steve Fenton , Diullei Gomes , Tass Iliopoulos , Jason Swearingen , Sean Hill , Guus Goossens , Kelly Summerlin , Basarat Ali Syed , Nicholas Wolverson , Derek Cicerone , Andrew Gaspar , James Harrison Fisher , Seikichi Kondo , Benjamin Jackman , Poul Sorensen , Josh Strobl , John Reilly , Dick van den Brink +// Definitions: https://github.com/borisyankov/DefinitelyTyped + +/* ***************************************************************************** +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. +***************************************************************************** */ + + +/** + * Interface for the AJAX setting that will configure the AJAX request + */ +interface JQueryAjaxSettings { + /** + * The content type sent in the request header that tells the server what kind of response it will accept in return. If the accepts setting needs modification, it is recommended to do so once in the $.ajaxSetup() method. + */ + accepts?: any; + /** + * By default, all requests are sent asynchronously (i.e. this is set to true by default). If you need synchronous requests, set this option to false. Cross-domain requests and dataType: "jsonp" requests do not support synchronous operation. Note that synchronous requests may temporarily lock the browser, disabling any actions while the request is active. As of jQuery 1.8, the use of async: false with jqXHR ($.Deferred) is deprecated; you must use the success/error/complete callback options instead of the corresponding methods of the jqXHR object such as jqXHR.done() or the deprecated jqXHR.success(). + */ + async?: boolean; + /** + * A pre-request callback function that can be used to modify the jqXHR (in jQuery 1.4.x, XMLHTTPRequest) object before it is sent. Use this to set custom headers, etc. The jqXHR and settings objects are passed as arguments. This is an Ajax Event. Returning false in the beforeSend function will cancel the request. As of jQuery 1.5, the beforeSend option will be called regardless of the type of request. + */ + beforeSend? (jqXHR: JQueryXHR, settings: JQueryAjaxSettings): any; + /** + * If set to false, it will force requested pages not to be cached by the browser. Note: Setting cache to false will only work correctly with HEAD and GET requests. It works by appending "_={timestamp}" to the GET parameters. The parameter is not needed for other types of requests, except in IE8 when a POST is made to a URL that has already been requested by a GET. + */ + cache?: boolean; + /** + * A function to be called when the request finishes (after success and error callbacks are executed). The function gets passed two arguments: The jqXHR (in jQuery 1.4.x, XMLHTTPRequest) object and a string categorizing the status of the request ("success", "notmodified", "error", "timeout", "abort", or "parsererror"). As of jQuery 1.5, the complete setting can accept an array of functions. Each function will be called in turn. This is an Ajax Event. + */ + complete? (jqXHR: JQueryXHR, textStatus: string): any; + /** + * An object of string/regular-expression pairs that determine how jQuery will parse the response, given its content type. (version added: 1.5) + */ + contents?: { [key: string]: any; }; + //According to jQuery.ajax source code, ajax's option actually allows contentType to set to "false" + // https://github.com/borisyankov/DefinitelyTyped/issues/742 + /** + * When sending data to the server, use this content type. Default is "application/x-www-form-urlencoded; charset=UTF-8", which is fine for most cases. If you explicitly pass in a content-type to $.ajax(), then it is always sent to the server (even if no data is sent). The W3C XMLHttpRequest specification dictates that the charset is always UTF-8; specifying another charset will not force the browser to change the encoding. + */ + contentType?: any; + /** + * This object will be made the context of all Ajax-related callbacks. By default, the context is an object that represents the ajax settings used in the call ($.ajaxSettings merged with the settings passed to $.ajax). + */ + context?: any; + /** + * An object containing dataType-to-dataType converters. Each converter's value is a function that returns the transformed value of the response. (version added: 1.5) + */ + converters?: { [key: string]: any; }; + /** + * If you wish to force a crossDomain request (such as JSONP) on the same domain, set the value of crossDomain to true. This allows, for example, server-side redirection to another domain. (version added: 1.5) + */ + crossDomain?: boolean; + /** + * Data to be sent to the server. It is converted to a query string, if not already a string. It's appended to the url for GET-requests. See processData option to prevent this automatic processing. Object must be Key/Value pairs. If value is an Array, jQuery serializes multiple values with same key based on the value of the traditional setting (described below). + */ + data?: any; + /** + * A function to be used to handle the raw response data of XMLHttpRequest.This is a pre-filtering function to sanitize the response. You should return the sanitized data. The function accepts two arguments: The raw data returned from the server and the 'dataType' parameter. + */ + dataFilter? (data: any, ty: any): any; + /** + * The type of data that you're expecting back from the server. If none is specified, jQuery will try to infer it based on the MIME type of the response (an XML MIME type will yield XML, in 1.4 JSON will yield a JavaScript object, in 1.4 script will execute the script, and anything else will be returned as a string). + */ + dataType?: string; + /** + * A function to be called if the request fails. The function receives three arguments: The jqXHR (in jQuery 1.4.x, XMLHttpRequest) object, a string describing the type of error that occurred and an optional exception object, if one occurred. Possible values for the second argument (besides null) are "timeout", "error", "abort", and "parsererror". When an HTTP error occurs, errorThrown receives the textual portion of the HTTP status, such as "Not Found" or "Internal Server Error." As of jQuery 1.5, the error setting can accept an array of functions. Each function will be called in turn. Note: This handler is not called for cross-domain script and cross-domain JSONP requests. This is an Ajax Event. + */ + error? (jqXHR: JQueryXHR, textStatus: string, errorThrown: string): any; + /** + * Whether to trigger global Ajax event handlers for this request. The default is true. Set to false to prevent the global handlers like ajaxStart or ajaxStop from being triggered. This can be used to control various Ajax Events. + */ + global?: boolean; + /** + * An object of additional header key/value pairs to send along with requests using the XMLHttpRequest transport. The header X-Requested-With: XMLHttpRequest is always added, but its default XMLHttpRequest value can be changed here. Values in the headers setting can also be overwritten from within the beforeSend function. (version added: 1.5) + */ + headers?: { [key: string]: any; }; + /** + * Allow the request to be successful only if the response has changed since the last request. This is done by checking the Last-Modified header. Default value is false, ignoring the header. In jQuery 1.4 this technique also checks the 'etag' specified by the server to catch unmodified data. + */ + ifModified?: boolean; + /** + * Allow the current environment to be recognized as "local," (e.g. the filesystem), even if jQuery does not recognize it as such by default. The following protocols are currently recognized as local: file, *-extension, and widget. If the isLocal setting needs modification, it is recommended to do so once in the $.ajaxSetup() method. (version added: 1.5.1) + */ + isLocal?: boolean; + /** + * Override the callback function name in a jsonp request. This value will be used instead of 'callback' in the 'callback=?' part of the query string in the url. So {jsonp:'onJSONPLoad'} would result in 'onJSONPLoad=?' passed to the server. As of jQuery 1.5, setting the jsonp option to false prevents jQuery from adding the "?callback" string to the URL or attempting to use "=?" for transformation. In this case, you should also explicitly set the jsonpCallback setting. For example, { jsonp: false, jsonpCallback: "callbackName" } + */ + jsonp?: any; + /** + * Specify the callback function name for a JSONP request. This value will be used instead of the random name automatically generated by jQuery. It is preferable to let jQuery generate a unique name as it'll make it easier to manage the requests and provide callbacks and error handling. You may want to specify the callback when you want to enable better browser caching of GET requests. As of jQuery 1.5, you can also use a function for this setting, in which case the value of jsonpCallback is set to the return value of that function. + */ + jsonpCallback?: any; + /** + * The HTTP method to use for the request (e.g. "POST", "GET", "PUT"). (version added: 1.9.0) + */ + method?: string; + /** + * A mime type to override the XHR mime type. (version added: 1.5.1) + */ + mimeType?: string; + /** + * A password to be used with XMLHttpRequest in response to an HTTP access authentication request. + */ + password?: string; + /** + * By default, data passed in to the data option as an object (technically, anything other than a string) will be processed and transformed into a query string, fitting to the default content-type "application/x-www-form-urlencoded". If you want to send a DOMDocument, or other non-processed data, set this option to false. + */ + processData?: boolean; + /** + * Only applies when the "script" transport is used (e.g., cross-domain requests with "jsonp" or "script" dataType and "GET" type). Sets the charset attribute on the script tag used in the request. Used when the character set on the local page is not the same as the one on the remote script. + */ + scriptCharset?: string; + /** + * An object of numeric HTTP codes and functions to be called when the response has the corresponding code. f the request is successful, the status code functions take the same parameters as the success callback; if it results in an error (including 3xx redirect), they take the same parameters as the error callback. (version added: 1.5) + */ + statusCode?: { [key: string]: any; }; + /** + * A function to be called if the request succeeds. The function gets passed three arguments: The data returned from the server, formatted according to the dataType parameter; a string describing the status; and the jqXHR (in jQuery 1.4.x, XMLHttpRequest) object. As of jQuery 1.5, the success setting can accept an array of functions. Each function will be called in turn. This is an Ajax Event. + */ + success? (data: any, textStatus: string, jqXHR: JQueryXHR): any; + /** + * Set a timeout (in milliseconds) for the request. This will override any global timeout set with $.ajaxSetup(). The timeout period starts at the point the $.ajax call is made; if several other requests are in progress and the browser has no connections available, it is possible for a request to time out before it can be sent. In jQuery 1.4.x and below, the XMLHttpRequest object will be in an invalid state if the request times out; accessing any object members may throw an exception. In Firefox 3.0+ only, script and JSONP requests cannot be cancelled by a timeout; the script will run even if it arrives after the timeout period. + */ + timeout?: number; + /** + * Set this to true if you wish to use the traditional style of param serialization. + */ + traditional?: boolean; + /** + * The type of request to make ("POST" or "GET"), default is "GET". Note: Other HTTP request methods, such as PUT and DELETE, can also be used here, but they are not supported by all browsers. + */ + type?: string; + /** + * A string containing the URL to which the request is sent. + */ + url?: string; + /** + * A username to be used with XMLHttpRequest in response to an HTTP access authentication request. + */ + username?: string; + /** + * Callback for creating the XMLHttpRequest object. Defaults to the ActiveXObject when available (IE), the XMLHttpRequest otherwise. Override to provide your own implementation for XMLHttpRequest or enhancements to the factory. + */ + xhr?: any; + /** + * An object of fieldName-fieldValue pairs to set on the native XHR object. For example, you can use it to set withCredentials to true for cross-domain requests if needed. In jQuery 1.5, the withCredentials property was not propagated to the native XHR and thus CORS requests requiring it would ignore this flag. For this reason, we recommend using jQuery 1.5.1+ should you require the use of it. (version added: 1.5.1) + */ + xhrFields?: { [key: string]: any; }; +} + +/** + * Interface for the jqXHR object + */ +interface JQueryXHR extends XMLHttpRequest, JQueryPromise { + /** + * The .overrideMimeType() method may be used in the beforeSend() callback function, for example, to modify the response content-type header. As of jQuery 1.5.1, the jqXHR object also contains the overrideMimeType() method (it was available in jQuery 1.4.x, as well, but was temporarily removed in jQuery 1.5). + */ + overrideMimeType(mimeType: string): any; + /** + * Cancel the request. + * + * @param statusText A string passed as the textStatus parameter for the done callback. Default value: "canceled" + */ + abort(statusText?: string): void; + /** + * Incorporates the functionality of the .done() and .fail() methods, allowing (as of jQuery 1.8) the underlying Promise to be manipulated. Refer to deferred.then() for implementation details. + */ + then(doneCallback: (data: any, textStatus: string, jqXHR: JQueryXHR) => void, failCallback?: (jqXHR: JQueryXHR, textStatus: string, errorThrown: any) => void): JQueryPromise; + /** + * Property containing the parsed response if the response Content-Type is json + */ + responseJSON?: any; + /** + * A function to be called if the request fails. + */ + error(xhr: JQueryXHR, textStatus: string, errorThrown: string): void; +} + +/** + * Interface for the JQuery callback + */ +interface JQueryCallback { + /** + * Add a callback or a collection of callbacks to a callback list. + * + * @param callbacks A function, or array of functions, that are to be added to the callback list. + */ + add(callbacks: Function): JQueryCallback; + /** + * Add a callback or a collection of callbacks to a callback list. + * + * @param callbacks A function, or array of functions, that are to be added to the callback list. + */ + add(callbacks: Function[]): JQueryCallback; + + /** + * Disable a callback list from doing anything more. + */ + disable(): JQueryCallback; + + /** + * Determine if the callbacks list has been disabled. + */ + disabled(): boolean; + + /** + * Remove all of the callbacks from a list. + */ + empty(): JQueryCallback; + + /** + * Call all of the callbacks with the given arguments + * + * @param arguments The argument or list of arguments to pass back to the callback list. + */ + fire(...arguments: any[]): JQueryCallback; + + /** + * Determine if the callbacks have already been called at least once. + */ + fired(): boolean; + + /** + * Call all callbacks in a list with the given context and arguments. + * + * @param context A reference to the context in which the callbacks in the list should be fired. + * @param arguments An argument, or array of arguments, to pass to the callbacks in the list. + */ + fireWith(context?: any, ...args: any[]): JQueryCallback; + + /** + * Determine whether a supplied callback is in a list + * + * @param callback The callback to search for. + */ + has(callback: Function): boolean; + + /** + * Lock a callback list in its current state. + */ + lock(): JQueryCallback; + + /** + * Determine if the callbacks list has been locked. + */ + locked(): boolean; + + /** + * Remove a callback or a collection of callbacks from a callback list. + * + * @param callbacks A function, or array of functions, that are to be removed from the callback list. + */ + remove(callbacks: Function): JQueryCallback; + /** + * Remove a callback or a collection of callbacks from a callback list. + * + * @param callbacks A function, or array of functions, that are to be removed from the callback list. + */ + remove(callbacks: Function[]): JQueryCallback; +} + +/** + * Allows jQuery Promises to interop with non-jQuery promises + */ +interface JQueryGenericPromise { + /** + * Add handlers to be called when the Deferred object is resolved, rejected, or still in progress. + * + * @param doneFilter A function that is called when the Deferred is resolved. + * @param failFilter An optional function that is called when the Deferred is rejected. + */ + then(doneFilter: (value?: T, ...values: any[]) => U|JQueryPromise, failFilter?: (...reasons: any[]) => any, progressFilter?: (...progression: any[]) => any): JQueryPromise; + + /** + * Add handlers to be called when the Deferred object is resolved, rejected, or still in progress. + * + * @param doneFilter A function that is called when the Deferred is resolved. + * @param failFilter An optional function that is called when the Deferred is rejected. + */ + then(doneFilter: (value?: T, ...values: any[]) => void, failFilter?: (...reasons: any[]) => any, progressFilter?: (...progression: any[]) => any): JQueryPromise; +} + +/** + * Interface for the JQuery promise/deferred callbacks + */ +interface JQueryPromiseCallback { + (value?: T, ...args: any[]): void; +} + +interface JQueryPromiseOperator { + (callback1: JQueryPromiseCallback|JQueryPromiseCallback[], ...callbacksN: Array|JQueryPromiseCallback[]>): JQueryPromise; +} + +/** + * Interface for the JQuery promise, part of callbacks + */ +interface JQueryPromise extends JQueryGenericPromise { + /** + * Determine the current state of a Deferred object. + */ + state(): string; + /** + * Add handlers to be called when the Deferred object is either resolved or rejected. + * + * @param alwaysCallbacks1 A function, or array of functions, that is called when the Deferred is resolved or rejected. + * @param alwaysCallbacks2 Optional additional functions, or arrays of functions, that are called when the Deferred is resolved or rejected. + */ + always(alwaysCallback1?: JQueryPromiseCallback|JQueryPromiseCallback[], ...alwaysCallbacksN: Array|JQueryPromiseCallback[]>): JQueryPromise; + /** + * Add handlers to be called when the Deferred object is resolved. + * + * @param doneCallbacks1 A function, or array of functions, that are called when the Deferred is resolved. + * @param doneCallbacks2 Optional additional functions, or arrays of functions, that are called when the Deferred is resolved. + */ + done(doneCallback1?: JQueryPromiseCallback|JQueryPromiseCallback[], ...doneCallbackN: Array|JQueryPromiseCallback[]>): JQueryPromise; + /** + * Add handlers to be called when the Deferred object is rejected. + * + * @param failCallbacks1 A function, or array of functions, that are called when the Deferred is rejected. + * @param failCallbacks2 Optional additional functions, or arrays of functions, that are called when the Deferred is rejected. + */ + fail(failCallback1?: JQueryPromiseCallback|JQueryPromiseCallback[], ...failCallbacksN: Array|JQueryPromiseCallback[]>): JQueryPromise; + /** + * Add handlers to be called when the Deferred object generates progress notifications. + * + * @param progressCallbacks A function, or array of functions, to be called when the Deferred generates progress notifications. + */ + progress(progressCallback1?: JQueryPromiseCallback|JQueryPromiseCallback[], ...progressCallbackN: Array|JQueryPromiseCallback[]>): JQueryPromise; + + // Deprecated - given no typings + pipe(doneFilter?: (x: any) => any, failFilter?: (x: any) => any, progressFilter?: (x: any) => any): JQueryPromise; +} + +/** + * Interface for the JQuery deferred, part of callbacks + */ +interface JQueryDeferred extends JQueryGenericPromise { + /** + * Determine the current state of a Deferred object. + */ + state(): string; + /** + * Add handlers to be called when the Deferred object is either resolved or rejected. + * + * @param alwaysCallbacks1 A function, or array of functions, that is called when the Deferred is resolved or rejected. + * @param alwaysCallbacks2 Optional additional functions, or arrays of functions, that are called when the Deferred is resolved or rejected. + */ + always(alwaysCallback1?: JQueryPromiseCallback|JQueryPromiseCallback[], ...alwaysCallbacksN: Array|JQueryPromiseCallback[]>): JQueryDeferred; + /** + * Add handlers to be called when the Deferred object is resolved. + * + * @param doneCallbacks1 A function, or array of functions, that are called when the Deferred is resolved. + * @param doneCallbacks2 Optional additional functions, or arrays of functions, that are called when the Deferred is resolved. + */ + done(doneCallback1?: JQueryPromiseCallback|JQueryPromiseCallback[], ...doneCallbackN: Array|JQueryPromiseCallback[]>): JQueryDeferred; + /** + * Add handlers to be called when the Deferred object is rejected. + * + * @param failCallbacks1 A function, or array of functions, that are called when the Deferred is rejected. + * @param failCallbacks2 Optional additional functions, or arrays of functions, that are called when the Deferred is rejected. + */ + fail(failCallback1?: JQueryPromiseCallback|JQueryPromiseCallback[], ...failCallbacksN: Array|JQueryPromiseCallback[]>): JQueryDeferred; + /** + * Add handlers to be called when the Deferred object generates progress notifications. + * + * @param progressCallbacks A function, or array of functions, to be called when the Deferred generates progress notifications. + */ + progress(progressCallback1?: JQueryPromiseCallback|JQueryPromiseCallback[], ...progressCallbackN: Array|JQueryPromiseCallback[]>): JQueryDeferred; + + /** + * Call the progressCallbacks on a Deferred object with the given args. + * + * @param args Optional arguments that are passed to the progressCallbacks. + */ + notify(value?: any, ...args: any[]): JQueryDeferred; + + /** + * Call the progressCallbacks on a Deferred object with the given context and args. + * + * @param context Context passed to the progressCallbacks as the this object. + * @param args Optional arguments that are passed to the progressCallbacks. + */ + notifyWith(context: any, value?: any, ...args: any[]): JQueryDeferred; + + /** + * Reject a Deferred object and call any failCallbacks with the given args. + * + * @param args Optional arguments that are passed to the failCallbacks. + */ + reject(value?: any, ...args: any[]): JQueryDeferred; + /** + * Reject a Deferred object and call any failCallbacks with the given context and args. + * + * @param context Context passed to the failCallbacks as the this object. + * @param args An optional array of arguments that are passed to the failCallbacks. + */ + rejectWith(context: any, value?: any, ...args: any[]): JQueryDeferred; + + /** + * Resolve a Deferred object and call any doneCallbacks with the given args. + * + * @param value First argument passed to doneCallbacks. + * @param args Optional subsequent arguments that are passed to the doneCallbacks. + */ + resolve(value?: T, ...args: any[]): JQueryDeferred; + + /** + * Resolve a Deferred object and call any doneCallbacks with the given context and args. + * + * @param context Context passed to the doneCallbacks as the this object. + * @param args An optional array of arguments that are passed to the doneCallbacks. + */ + resolveWith(context: any, value?: T, ...args: any[]): JQueryDeferred; + + /** + * Return a Deferred's Promise object. + * + * @param target Object onto which the promise methods have to be attached + */ + promise(target?: any): JQueryPromise; + + // Deprecated - given no typings + pipe(doneFilter?: (x: any) => any, failFilter?: (x: any) => any, progressFilter?: (x: any) => any): JQueryPromise; +} + +/** + * Interface of the JQuery extension of the W3C event object + */ +interface BaseJQueryEventObject extends Event { + data: any; + delegateTarget: Element; + isDefaultPrevented(): boolean; + isImmediatePropagationStopped(): boolean; + isPropagationStopped(): boolean; + namespace: string; + originalEvent: Event; + preventDefault(): any; + relatedTarget: Element; + result: any; + stopImmediatePropagation(): void; + stopPropagation(): void; + target: Element; + pageX: number; + pageY: number; + which: number; + metaKey: boolean; +} + +interface JQueryInputEventObject extends BaseJQueryEventObject { + altKey: boolean; + ctrlKey: boolean; + metaKey: boolean; + shiftKey: boolean; +} + +interface JQueryMouseEventObject extends JQueryInputEventObject { + button: number; + clientX: number; + clientY: number; + offsetX: number; + offsetY: number; + pageX: number; + pageY: number; + screenX: number; + screenY: number; +} + +interface JQueryKeyEventObject extends JQueryInputEventObject { + char: any; + charCode: number; + key: any; + keyCode: number; +} + +interface JQueryEventObject extends BaseJQueryEventObject, JQueryInputEventObject, JQueryMouseEventObject, JQueryKeyEventObject{ +} + +/* + Collection of properties of the current browser +*/ + +interface JQuerySupport { + ajax?: boolean; + boxModel?: boolean; + changeBubbles?: boolean; + checkClone?: boolean; + checkOn?: boolean; + cors?: boolean; + cssFloat?: boolean; + hrefNormalized?: boolean; + htmlSerialize?: boolean; + leadingWhitespace?: boolean; + noCloneChecked?: boolean; + noCloneEvent?: boolean; + opacity?: boolean; + optDisabled?: boolean; + optSelected?: boolean; + scriptEval? (): boolean; + style?: boolean; + submitBubbles?: boolean; + tbody?: boolean; +} + +interface JQueryParam { + /** + * Create a serialized representation of an array or object, suitable for use in a URL query string or Ajax request. + * + * @param obj An array or object to serialize. + */ + (obj: any): string; + + /** + * Create a serialized representation of an array or object, suitable for use in a URL query string or Ajax request. + * + * @param obj An array or object to serialize. + * @param traditional A Boolean indicating whether to perform a traditional "shallow" serialization. + */ + (obj: any, traditional: boolean): string; +} + +/** + * The interface used to construct jQuery events (with $.Event). It is + * defined separately instead of inline in JQueryStatic to allow + * overriding the construction function with specific strings + * returning specific event objects. + */ +interface JQueryEventConstructor { + (name: string, eventProperties?: any): JQueryEventObject; + new (name: string, eventProperties?: any): JQueryEventObject; +} + +/** + * The interface used to specify coordinates. + */ +interface JQueryCoordinates { + left: number; + top: number; +} + +/** + * Elements in the array returned by serializeArray() + */ +interface JQuerySerializeArrayElement { + name: string; + value: string; +} + +interface JQueryAnimationOptions { + /** + * A string or number determining how long the animation will run. + */ + duration?: any; + /** + * A string indicating which easing function to use for the transition. + */ + easing?: string; + /** + * A function to call once the animation is complete. + */ + complete?: Function; + /** + * A function to be called for each animated property of each animated element. This function provides an opportunity to modify the Tween object to change the value of the property before it is set. + */ + step?: (now: number, tween: any) => any; + /** + * A function to be called after each step of the animation, only once per animated element regardless of the number of animated properties. (version added: 1.8) + */ + progress?: (animation: JQueryPromise, progress: number, remainingMs: number) => any; + /** + * A function to call when the animation begins. (version added: 1.8) + */ + start?: (animation: JQueryPromise) => any; + /** + * A function to be called when the animation completes (its Promise object is resolved). (version added: 1.8) + */ + done?: (animation: JQueryPromise, jumpedToEnd: boolean) => any; + /** + * A function to be called when the animation fails to complete (its Promise object is rejected). (version added: 1.8) + */ + fail?: (animation: JQueryPromise, jumpedToEnd: boolean) => any; + /** + * A function to be called when the animation completes or stops without completing (its Promise object is either resolved or rejected). (version added: 1.8) + */ + always?: (animation: JQueryPromise, jumpedToEnd: boolean) => any; + /** + * A Boolean indicating whether to place the animation in the effects queue. If false, the animation will begin immediately. As of jQuery 1.7, the queue option can also accept a string, in which case the animation is added to the queue represented by that string. When a custom queue name is used the animation does not automatically start; you must call .dequeue("queuename") to start it. + */ + queue?: any; + /** + * A map of one or more of the CSS properties defined by the properties argument and their corresponding easing functions. (version added: 1.4) + */ + specialEasing?: Object; +} + +/** + * Static members of jQuery (those on $ and jQuery themselves) + */ +interface JQueryStatic { + + /** + * Perform an asynchronous HTTP (Ajax) request. + * + * @param settings A set of key/value pairs that configure the Ajax request. All settings are optional. A default can be set for any option with $.ajaxSetup(). + */ + ajax(settings: JQueryAjaxSettings): JQueryXHR; + /** + * Perform an asynchronous HTTP (Ajax) request. + * + * @param url A string containing the URL to which the request is sent. + * @param settings A set of key/value pairs that configure the Ajax request. All settings are optional. A default can be set for any option with $.ajaxSetup(). + */ + ajax(url: string, settings?: JQueryAjaxSettings): JQueryXHR; + + /** + * Handle custom Ajax options or modify existing options before each request is sent and before they are processed by $.ajax(). + * + * @param dataTypes An optional string containing one or more space-separated dataTypes + * @param handler A handler to set default values for future Ajax requests. + */ + ajaxPrefilter(dataTypes: string, handler: (opts: any, originalOpts: JQueryAjaxSettings, jqXHR: JQueryXHR) => any): void; + /** + * Handle custom Ajax options or modify existing options before each request is sent and before they are processed by $.ajax(). + * + * @param handler A handler to set default values for future Ajax requests. + */ + ajaxPrefilter(handler: (opts: any, originalOpts: JQueryAjaxSettings, jqXHR: JQueryXHR) => any): void; + + ajaxSettings: JQueryAjaxSettings; + + /** + * Set default values for future Ajax requests. Its use is not recommended. + * + * @param options A set of key/value pairs that configure the default Ajax request. All options are optional. + */ + ajaxSetup(options: JQueryAjaxSettings): void; + + /** + * Load data from the server using a HTTP GET request. + * + * @param url A string containing the URL to which the request is sent. + * @param success A callback function that is executed if the request succeeds. + * @param dataType The type of data expected from the server. Default: Intelligent Guess (xml, json, script, or html). + */ + get(url: string, success?: (data: any, textStatus: string, jqXHR: JQueryXHR) => any, dataType?: string): JQueryXHR; + /** + * Load data from the server using a HTTP GET request. + * + * @param url A string containing the URL to which the request is sent. + * @param data A plain object or string that is sent to the server with the request. + * @param success A callback function that is executed if the request succeeds. + * @param dataType The type of data expected from the server. Default: Intelligent Guess (xml, json, script, or html). + */ + get(url: string, data?: Object|string, success?: (data: any, textStatus: string, jqXHR: JQueryXHR) => any, dataType?: string): JQueryXHR; + /** + * Load JSON-encoded data from the server using a GET HTTP request. + * + * @param url A string containing the URL to which the request is sent. + * @param success A callback function that is executed if the request succeeds. + */ + getJSON(url: string, success?: (data: any, textStatus: string, jqXHR: JQueryXHR) => any): JQueryXHR; + /** + * Load JSON-encoded data from the server using a GET HTTP request. + * + * @param url A string containing the URL to which the request is sent. + * @param data A plain object or string that is sent to the server with the request. + * @param success A callback function that is executed if the request succeeds. + */ + getJSON(url: string, data?: Object|string, success?: (data: any, textStatus: string, jqXHR: JQueryXHR) => any): JQueryXHR; + /** + * Load a JavaScript file from the server using a GET HTTP request, then execute it. + * + * @param url A string containing the URL to which the request is sent. + * @param success A callback function that is executed if the request succeeds. + */ + getScript(url: string, success?: (script: string, textStatus: string, jqXHR: JQueryXHR) => any): JQueryXHR; + + /** + * Create a serialized representation of an array or object, suitable for use in a URL query string or Ajax request. + */ + param: JQueryParam; + + /** + * Load data from the server using a HTTP POST request. + * + * @param url A string containing the URL to which the request is sent. + * @param success A callback function that is executed if the request succeeds. Required if dataType is provided, but can be null in that case. + * @param dataType The type of data expected from the server. Default: Intelligent Guess (xml, json, script, text, html). + */ + post(url: string, success?: (data: any, textStatus: string, jqXHR: JQueryXHR) => any, dataType?: string): JQueryXHR; + /** + * Load data from the server using a HTTP POST request. + * + * @param url A string containing the URL to which the request is sent. + * @param data A plain object or string that is sent to the server with the request. + * @param success A callback function that is executed if the request succeeds. Required if dataType is provided, but can be null in that case. + * @param dataType The type of data expected from the server. Default: Intelligent Guess (xml, json, script, text, html). + */ + post(url: string, data?: Object|string, success?: (data: any, textStatus: string, jqXHR: JQueryXHR) => any, dataType?: string): JQueryXHR; + + /** + * A multi-purpose callbacks list object that provides a powerful way to manage callback lists. + * + * @param flags An optional list of space-separated flags that change how the callback list behaves. + */ + Callbacks(flags?: string): JQueryCallback; + + /** + * Holds or releases the execution of jQuery's ready event. + * + * @param hold Indicates whether the ready hold is being requested or released + */ + holdReady(hold: boolean): void; + + /** + * Accepts a string containing a CSS selector which is then used to match a set of elements. + * + * @param selector A string containing a selector expression + * @param context A DOM Element, Document, or jQuery to use as context + */ + (selector: string, context?: Element|JQuery): JQuery; + + /** + * Accepts a string containing a CSS selector which is then used to match a set of elements. + * + * @param element A DOM element to wrap in a jQuery object. + */ + (element: Element): JQuery; + + /** + * Accepts a string containing a CSS selector which is then used to match a set of elements. + * + * @param elementArray An array containing a set of DOM elements to wrap in a jQuery object. + */ + (elementArray: Element[]): JQuery; + + /** + * Binds a function to be executed when the DOM has finished loading. + * + * @param callback A function to execute after the DOM is ready. + */ + (callback: (jQueryAlias?: JQueryStatic) => any): JQuery; + + /** + * Accepts a string containing a CSS selector which is then used to match a set of elements. + * + * @param object A plain object to wrap in a jQuery object. + */ + (object: {}): JQuery; + + /** + * Accepts a string containing a CSS selector which is then used to match a set of elements. + * + * @param object An existing jQuery object to clone. + */ + (object: JQuery): JQuery; + + /** + * Specify a function to execute when the DOM is fully loaded. + */ + (): JQuery; + + /** + * Creates DOM elements on the fly from the provided string of raw HTML. + * + * @param html A string of HTML to create on the fly. Note that this parses HTML, not XML. + * @param ownerDocument A document in which the new elements will be created. + */ + (html: string, ownerDocument?: Document): JQuery; + + /** + * Creates DOM elements on the fly from the provided string of raw HTML. + * + * @param html A string defining a single, standalone, HTML element (e.g.
    or
    ). + * @param attributes An object of attributes, events, and methods to call on the newly-created element. + */ + (html: string, attributes: Object): JQuery; + + /** + * Relinquish jQuery's control of the $ variable. + * + * @param removeAll A Boolean indicating whether to remove all jQuery variables from the global scope (including jQuery itself). + */ + noConflict(removeAll?: boolean): Object; + + /** + * Provides a way to execute callback functions based on one or more objects, usually Deferred objects that represent asynchronous events. + * + * @param deferreds One or more Deferred objects, or plain JavaScript objects. + */ + when(...deferreds: Array/* as JQueryDeferred */>): JQueryPromise; + + /** + * Hook directly into jQuery to override how particular CSS properties are retrieved or set, normalize CSS property naming, or create custom properties. + */ + cssHooks: { [key: string]: any; }; + cssNumber: any; + + /** + * Store arbitrary data associated with the specified element. Returns the value that was set. + * + * @param element The DOM element to associate with the data. + * @param key A string naming the piece of data to set. + * @param value The new data value. + */ + data(element: Element, key: string, value: T): T; + /** + * Returns value at named data store for the element, as set by jQuery.data(element, name, value), or the full data store for the element. + * + * @param element The DOM element to associate with the data. + * @param key A string naming the piece of data to set. + */ + data(element: Element, key: string): any; + /** + * Returns value at named data store for the element, as set by jQuery.data(element, name, value), or the full data store for the element. + * + * @param element The DOM element to associate with the data. + */ + data(element: Element): any; + + /** + * Execute the next function on the queue for the matched element. + * + * @param element A DOM element from which to remove and execute a queued function. + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + */ + dequeue(element: Element, queueName?: string): void; + + /** + * Determine whether an element has any jQuery data associated with it. + * + * @param element A DOM element to be checked for data. + */ + hasData(element: Element): boolean; + + /** + * Show the queue of functions to be executed on the matched element. + * + * @param element A DOM element to inspect for an attached queue. + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + */ + queue(element: Element, queueName?: string): any[]; + /** + * Manipulate the queue of functions to be executed on the matched element. + * + * @param element A DOM element where the array of queued functions is attached. + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + * @param newQueue An array of functions to replace the current queue contents. + */ + queue(element: Element, queueName: string, newQueue: Function[]): JQuery; + /** + * Manipulate the queue of functions to be executed on the matched element. + * + * @param element A DOM element on which to add a queued function. + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + * @param callback The new function to add to the queue. + */ + queue(element: Element, queueName: string, callback: Function): JQuery; + + /** + * Remove a previously-stored piece of data. + * + * @param element A DOM element from which to remove data. + * @param name A string naming the piece of data to remove. + */ + removeData(element: Element, name?: string): JQuery; + + /** + * A constructor function that returns a chainable utility object with methods to register multiple callbacks into callback queues, invoke callback queues, and relay the success or failure state of any synchronous or asynchronous function. + * + * @param beforeStart A function that is called just before the constructor returns. + */ + Deferred(beforeStart?: (deferred: JQueryDeferred) => any): JQueryDeferred; + + /** + * Effects + */ + fx: { + tick: () => void; + /** + * The rate (in milliseconds) at which animations fire. + */ + interval: number; + stop: () => void; + speeds: { slow: number; fast: number; }; + /** + * Globally disable all animations. + */ + off: boolean; + step: any; + }; + + /** + * Takes a function and returns a new one that will always have a particular context. + * + * @param fnction The function whose context will be changed. + * @param context The object to which the context (this) of the function should be set. + * @param additionalArguments Any number of arguments to be passed to the function referenced in the function argument. + */ + proxy(fnction: (...args: any[]) => any, context: Object, ...additionalArguments: any[]): any; + /** + * Takes a function and returns a new one that will always have a particular context. + * + * @param context The object to which the context (this) of the function should be set. + * @param name The name of the function whose context will be changed (should be a property of the context object). + * @param additionalArguments Any number of arguments to be passed to the function named in the name argument. + */ + proxy(context: Object, name: string, ...additionalArguments: any[]): any; + + Event: JQueryEventConstructor; + + /** + * Takes a string and throws an exception containing it. + * + * @param message The message to send out. + */ + error(message: any): JQuery; + + expr: any; + fn: any; //TODO: Decide how we want to type this + + isReady: boolean; + + // Properties + support: JQuerySupport; + + /** + * Check to see if a DOM element is a descendant of another DOM element. + * + * @param container The DOM element that may contain the other element. + * @param contained The DOM element that may be contained by (a descendant of) the other element. + */ + contains(container: Element, contained: Element): boolean; + + /** + * A generic iterator function, which can be used to seamlessly iterate over both objects and arrays. Arrays and array-like objects with a length property (such as a function's arguments object) are iterated by numeric index, from 0 to length-1. Other objects are iterated via their named properties. + * + * @param collection The object or array to iterate over. + * @param callback The function that will be executed on every object. + */ + each( + collection: T[], + callback: (indexInArray: number, valueOfElement: T) => any + ): any; + + /** + * A generic iterator function, which can be used to seamlessly iterate over both objects and arrays. Arrays and array-like objects with a length property (such as a function's arguments object) are iterated by numeric index, from 0 to length-1. Other objects are iterated via their named properties. + * + * @param collection The object or array to iterate over. + * @param callback The function that will be executed on every object. + */ + each( + collection: any, + callback: (indexInArray: any, valueOfElement: any) => any + ): any; + + /** + * Merge the contents of two or more objects together into the first object. + * + * @param target An object that will receive the new properties if additional objects are passed in or that will extend the jQuery namespace if it is the sole argument. + * @param object1 An object containing additional properties to merge in. + * @param objectN Additional objects containing properties to merge in. + */ + extend(target: any, object1?: any, ...objectN: any[]): any; + /** + * Merge the contents of two or more objects together into the first object. + * + * @param deep If true, the merge becomes recursive (aka. deep copy). + * @param target The object to extend. It will receive the new properties. + * @param object1 An object containing additional properties to merge in. + * @param objectN Additional objects containing properties to merge in. + */ + extend(deep: boolean, target: any, object1?: any, ...objectN: any[]): any; + + /** + * Execute some JavaScript code globally. + * + * @param code The JavaScript code to execute. + */ + globalEval(code: string): any; + + /** + * Finds the elements of an array which satisfy a filter function. The original array is not affected. + * + * @param array The array to search through. + * @param func The function to process each item against. The first argument to the function is the item, and the second argument is the index. The function should return a Boolean value. this will be the global window object. + * @param invert If "invert" is false, or not provided, then the function returns an array consisting of all elements for which "callback" returns true. If "invert" is true, then the function returns an array consisting of all elements for which "callback" returns false. + */ + grep(array: T[], func: (elementOfArray: T, indexInArray: number) => boolean, invert?: boolean): T[]; + + /** + * Search for a specified value within an array and return its index (or -1 if not found). + * + * @param value The value to search for. + * @param array An array through which to search. + * @param fromIndex he index of the array at which to begin the search. The default is 0, which will search the whole array. + */ + inArray(value: T, array: T[], fromIndex?: number): number; + + /** + * Determine whether the argument is an array. + * + * @param obj Object to test whether or not it is an array. + */ + isArray(obj: any): boolean; + /** + * Check to see if an object is empty (contains no enumerable properties). + * + * @param obj The object that will be checked to see if it's empty. + */ + isEmptyObject(obj: any): boolean; + /** + * Determine if the argument passed is a Javascript function object. + * + * @param obj Object to test whether or not it is a function. + */ + isFunction(obj: any): boolean; + /** + * Determines whether its argument is a number. + * + * @param obj The value to be tested. + */ + isNumeric(value: any): boolean; + /** + * Check to see if an object is a plain object (created using "{}" or "new Object"). + * + * @param obj The object that will be checked to see if it's a plain object. + */ + isPlainObject(obj: any): boolean; + /** + * Determine whether the argument is a window. + * + * @param obj Object to test whether or not it is a window. + */ + isWindow(obj: any): boolean; + /** + * Check to see if a DOM node is within an XML document (or is an XML document). + * + * @param node he DOM node that will be checked to see if it's in an XML document. + */ + isXMLDoc(node: Node): boolean; + + /** + * Convert an array-like object into a true JavaScript array. + * + * @param obj Any object to turn into a native Array. + */ + makeArray(obj: any): any[]; + + /** + * Translate all items in an array or object to new array of items. + * + * @param array The Array to translate. + * @param callback The function to process each item against. The first argument to the function is the array item, the second argument is the index in array The function can return any value. Within the function, this refers to the global (window) object. + */ + map(array: T[], callback: (elementOfArray: T, indexInArray: number) => U): U[]; + /** + * Translate all items in an array or object to new array of items. + * + * @param arrayOrObject The Array or Object to translate. + * @param callback The function to process each item against. The first argument to the function is the value; the second argument is the index or key of the array or object property. The function can return any value to add to the array. A returned array will be flattened into the resulting array. Within the function, this refers to the global (window) object. + */ + map(arrayOrObject: any, callback: (value: any, indexOrKey: any) => any): any; + + /** + * Merge the contents of two arrays together into the first array. + * + * @param first The first array to merge, the elements of second added. + * @param second The second array to merge into the first, unaltered. + */ + merge(first: T[], second: T[]): T[]; + + /** + * An empty function. + */ + noop(): any; + + /** + * Return a number representing the current time. + */ + now(): number; + + /** + * Takes a well-formed JSON string and returns the resulting JavaScript object. + * + * @param json The JSON string to parse. + */ + parseJSON(json: string): any; + + /** + * Parses a string into an XML document. + * + * @param data a well-formed XML string to be parsed + */ + parseXML(data: string): XMLDocument; + + /** + * Remove the whitespace from the beginning and end of a string. + * + * @param str Remove the whitespace from the beginning and end of a string. + */ + trim(str: string): string; + + /** + * Determine the internal JavaScript [[Class]] of an object. + * + * @param obj Object to get the internal JavaScript [[Class]] of. + */ + type(obj: any): string; + + /** + * Sorts an array of DOM elements, in place, with the duplicates removed. Note that this only works on arrays of DOM elements, not strings or numbers. + * + * @param array The Array of DOM elements. + */ + unique(array: Element[]): Element[]; + + /** + * Parses a string into an array of DOM nodes. + * + * @param data HTML string to be parsed + * @param context DOM element to serve as the context in which the HTML fragment will be created + * @param keepScripts A Boolean indicating whether to include scripts passed in the HTML string + */ + parseHTML(data: string, context?: HTMLElement, keepScripts?: boolean): any[]; + + /** + * Parses a string into an array of DOM nodes. + * + * @param data HTML string to be parsed + * @param context DOM element to serve as the context in which the HTML fragment will be created + * @param keepScripts A Boolean indicating whether to include scripts passed in the HTML string + */ + parseHTML(data: string, context?: Document, keepScripts?: boolean): any[]; +} + +/** + * The jQuery instance members + */ +interface JQuery { + /** + * Register a handler to be called when Ajax requests complete. This is an AjaxEvent. + * + * @param handler The function to be invoked. + */ + ajaxComplete(handler: (event: JQueryEventObject, XMLHttpRequest: XMLHttpRequest, ajaxOptions: any) => any): JQuery; + /** + * Register a handler to be called when Ajax requests complete with an error. This is an Ajax Event. + * + * @param handler The function to be invoked. + */ + ajaxError(handler: (event: JQueryEventObject, jqXHR: JQueryXHR, ajaxSettings: JQueryAjaxSettings, thrownError: any) => any): JQuery; + /** + * Attach a function to be executed before an Ajax request is sent. This is an Ajax Event. + * + * @param handler The function to be invoked. + */ + ajaxSend(handler: (event: JQueryEventObject, jqXHR: JQueryXHR, ajaxOptions: JQueryAjaxSettings) => any): JQuery; + /** + * Register a handler to be called when the first Ajax request begins. This is an Ajax Event. + * + * @param handler The function to be invoked. + */ + ajaxStart(handler: () => any): JQuery; + /** + * Register a handler to be called when all Ajax requests have completed. This is an Ajax Event. + * + * @param handler The function to be invoked. + */ + ajaxStop(handler: () => any): JQuery; + /** + * Attach a function to be executed whenever an Ajax request completes successfully. This is an Ajax Event. + * + * @param handler The function to be invoked. + */ + ajaxSuccess(handler: (event: JQueryEventObject, XMLHttpRequest: XMLHttpRequest, ajaxOptions: JQueryAjaxSettings) => any): JQuery; + + /** + * Load data from the server and place the returned HTML into the matched element. + * + * @param url A string containing the URL to which the request is sent. + * @param data A plain object or string that is sent to the server with the request. + * @param complete A callback function that is executed when the request completes. + */ + load(url: string, data?: string|Object, complete?: (responseText: string, textStatus: string, XMLHttpRequest: XMLHttpRequest) => any): JQuery; + + /** + * Encode a set of form elements as a string for submission. + */ + serialize(): string; + /** + * Encode a set of form elements as an array of names and values. + */ + serializeArray(): JQuerySerializeArrayElement[]; + + /** + * Adds the specified class(es) to each of the set of matched elements. + * + * @param className One or more space-separated classes to be added to the class attribute of each matched element. + */ + addClass(className: string): JQuery; + /** + * Adds the specified class(es) to each of the set of matched elements. + * + * @param function A function returning one or more space-separated class names to be added to the existing class name(s). Receives the index position of the element in the set and the existing class name(s) as arguments. Within the function, this refers to the current element in the set. + */ + addClass(func: (index: number, className: string) => string): JQuery; + + /** + * Add the previous set of elements on the stack to the current set, optionally filtered by a selector. + */ + addBack(selector?: string): JQuery; + + /** + * Get the value of an attribute for the first element in the set of matched elements. + * + * @param attributeName The name of the attribute to get. + */ + attr(attributeName: string): string; + /** + * Set one or more attributes for the set of matched elements. + * + * @param attributeName The name of the attribute to set. + * @param value A value to set for the attribute. + */ + attr(attributeName: string, value: string|number): JQuery; + /** + * Set one or more attributes for the set of matched elements. + * + * @param attributeName The name of the attribute to set. + * @param func A function returning the value to set. this is the current element. Receives the index position of the element in the set and the old attribute value as arguments. + */ + attr(attributeName: string, func: (index: number, attr: string) => string|number): JQuery; + /** + * Set one or more attributes for the set of matched elements. + * + * @param attributes An object of attribute-value pairs to set. + */ + attr(attributes: Object): JQuery; + + /** + * Determine whether any of the matched elements are assigned the given class. + * + * @param className The class name to search for. + */ + hasClass(className: string): boolean; + + /** + * Get the HTML contents of the first element in the set of matched elements. + */ + html(): string; + /** + * Set the HTML contents of each element in the set of matched elements. + * + * @param htmlString A string of HTML to set as the content of each matched element. + */ + html(htmlString: string): JQuery; + /** + * Set the HTML contents of each element in the set of matched elements. + * + * @param func A function returning the HTML content to set. Receives the index position of the element in the set and the old HTML value as arguments. jQuery empties the element before calling the function; use the oldhtml argument to reference the previous content. Within the function, this refers to the current element in the set. + */ + html(func: (index: number, oldhtml: string) => string): JQuery; + /** + * Set the HTML contents of each element in the set of matched elements. + * + * @param func A function returning the HTML content to set. Receives the index position of the element in the set and the old HTML value as arguments. jQuery empties the element before calling the function; use the oldhtml argument to reference the previous content. Within the function, this refers to the current element in the set. + */ + + /** + * Get the value of a property for the first element in the set of matched elements. + * + * @param propertyName The name of the property to get. + */ + prop(propertyName: string): any; + /** + * Set one or more properties for the set of matched elements. + * + * @param propertyName The name of the property to set. + * @param value A value to set for the property. + */ + prop(propertyName: string, value: string|number|boolean): JQuery; + /** + * Set one or more properties for the set of matched elements. + * + * @param properties An object of property-value pairs to set. + */ + prop(properties: Object): JQuery; + /** + * Set one or more properties for the set of matched elements. + * + * @param propertyName The name of the property to set. + * @param func A function returning the value to set. Receives the index position of the element in the set and the old property value as arguments. Within the function, the keyword this refers to the current element. + */ + prop(propertyName: string, func: (index: number, oldPropertyValue: any) => any): JQuery; + + /** + * Remove an attribute from each element in the set of matched elements. + * + * @param attributeName An attribute to remove; as of version 1.7, it can be a space-separated list of attributes. + */ + removeAttr(attributeName: string): JQuery; + + /** + * Remove a single class, multiple classes, or all classes from each element in the set of matched elements. + * + * @param className One or more space-separated classes to be removed from the class attribute of each matched element. + */ + removeClass(className?: string): JQuery; + /** + * Remove a single class, multiple classes, or all classes from each element in the set of matched elements. + * + * @param function A function returning one or more space-separated class names to be removed. Receives the index position of the element in the set and the old class value as arguments. + */ + removeClass(func: (index: number, className: string) => string): JQuery; + + /** + * Remove a property for the set of matched elements. + * + * @param propertyName The name of the property to remove. + */ + removeProp(propertyName: string): JQuery; + + /** + * Add or remove one or more classes from each element in the set of matched elements, depending on either the class's presence or the value of the switch argument. + * + * @param className One or more class names (separated by spaces) to be toggled for each element in the matched set. + * @param swtch A Boolean (not just truthy/falsy) value to determine whether the class should be added or removed. + */ + toggleClass(className: string, swtch?: boolean): JQuery; + /** + * Add or remove one or more classes from each element in the set of matched elements, depending on either the class's presence or the value of the switch argument. + * + * @param swtch A boolean value to determine whether the class should be added or removed. + */ + toggleClass(swtch?: boolean): JQuery; + /** + * Add or remove one or more classes from each element in the set of matched elements, depending on either the class's presence or the value of the switch argument. + * + * @param func A function that returns class names to be toggled in the class attribute of each element in the matched set. Receives the index position of the element in the set, the old class value, and the switch as arguments. + * @param swtch A boolean value to determine whether the class should be added or removed. + */ + toggleClass(func: (index: number, className: string, swtch: boolean) => string, swtch?: boolean): JQuery; + + /** + * Get the current value of the first element in the set of matched elements. + */ + val(): any; + /** + * Set the value of each element in the set of matched elements. + * + * @param value A string of text or an array of strings corresponding to the value of each matched element to set as selected/checked. + */ + val(value: string|string[]): JQuery; + /** + * Set the value of each element in the set of matched elements. + * + * @param func A function returning the value to set. this is the current element. Receives the index position of the element in the set and the old value as arguments. + */ + val(func: (index: number, value: string) => string): JQuery; + + + /** + * Get the value of style properties for the first element in the set of matched elements. + * + * @param propertyName A CSS property. + */ + css(propertyName: string): string; + /** + * Set one or more CSS properties for the set of matched elements. + * + * @param propertyName A CSS property name. + * @param value A value to set for the property. + */ + css(propertyName: string, value: string|number): JQuery; + /** + * Set one or more CSS properties for the set of matched elements. + * + * @param propertyName A CSS property name. + * @param value A function returning the value to set. this is the current element. Receives the index position of the element in the set and the old value as arguments. + */ + css(propertyName: string, value: (index: number, value: string) => string|number): JQuery; + /** + * Set one or more CSS properties for the set of matched elements. + * + * @param properties An object of property-value pairs to set. + */ + css(properties: Object): JQuery; + + /** + * Get the current computed height for the first element in the set of matched elements. + */ + height(): number; + /** + * Set the CSS height of every matched element. + * + * @param value An integer representing the number of pixels, or an integer with an optional unit of measure appended (as a string). + */ + height(value: number|string): JQuery; + /** + * Set the CSS height of every matched element. + * + * @param func A function returning the height to set. Receives the index position of the element in the set and the old height as arguments. Within the function, this refers to the current element in the set. + */ + height(func: (index: number, height: number) => number|string): JQuery; + + /** + * Get the current computed height for the first element in the set of matched elements, including padding but not border. + */ + innerHeight(): number; + + /** + * Sets the inner height on elements in the set of matched elements, including padding but not border. + * + * @param value An integer representing the number of pixels, or an integer along with an optional unit of measure appended (as a string). + */ + innerHeight(height: number|string): JQuery; + + /** + * Get the current computed width for the first element in the set of matched elements, including padding but not border. + */ + innerWidth(): number; + + /** + * Sets the inner width on elements in the set of matched elements, including padding but not border. + * + * @param value An integer representing the number of pixels, or an integer along with an optional unit of measure appended (as a string). + */ + innerWidth(width: number|string): JQuery; + + /** + * Get the current coordinates of the first element in the set of matched elements, relative to the document. + */ + offset(): JQueryCoordinates; + /** + * An object containing the properties top and left, which are integers indicating the new top and left coordinates for the elements. + * + * @param coordinates An object containing the properties top and left, which are integers indicating the new top and left coordinates for the elements. + */ + offset(coordinates: JQueryCoordinates): JQuery; + /** + * An object containing the properties top and left, which are integers indicating the new top and left coordinates for the elements. + * + * @param func A function to return the coordinates to set. Receives the index of the element in the collection as the first argument and the current coordinates as the second argument. The function should return an object with the new top and left properties. + */ + offset(func: (index: number, coords: JQueryCoordinates) => JQueryCoordinates): JQuery; + + /** + * Get the current computed height for the first element in the set of matched elements, including padding, border, and optionally margin. Returns an integer (without "px") representation of the value or null if called on an empty set of elements. + * + * @param includeMargin A Boolean indicating whether to include the element's margin in the calculation. + */ + outerHeight(includeMargin?: boolean): number; + + /** + * Sets the outer height on elements in the set of matched elements, including padding and border. + * + * @param value An integer representing the number of pixels, or an integer along with an optional unit of measure appended (as a string). + */ + outerHeight(height: number|string): JQuery; + + /** + * Get the current computed width for the first element in the set of matched elements, including padding and border. + * + * @param includeMargin A Boolean indicating whether to include the element's margin in the calculation. + */ + outerWidth(includeMargin?: boolean): number; + + /** + * Sets the outer width on elements in the set of matched elements, including padding and border. + * + * @param value An integer representing the number of pixels, or an integer along with an optional unit of measure appended (as a string). + */ + outerWidth(width: number|string): JQuery; + + /** + * Get the current coordinates of the first element in the set of matched elements, relative to the offset parent. + */ + position(): JQueryCoordinates; + + /** + * Get the current horizontal position of the scroll bar for the first element in the set of matched elements or set the horizontal position of the scroll bar for every matched element. + */ + scrollLeft(): number; + /** + * Set the current horizontal position of the scroll bar for each of the set of matched elements. + * + * @param value An integer indicating the new position to set the scroll bar to. + */ + scrollLeft(value: number): JQuery; + + /** + * Get the current vertical position of the scroll bar for the first element in the set of matched elements or set the vertical position of the scroll bar for every matched element. + */ + scrollTop(): number; + /** + * Set the current vertical position of the scroll bar for each of the set of matched elements. + * + * @param value An integer indicating the new position to set the scroll bar to. + */ + scrollTop(value: number): JQuery; + + /** + * Get the current computed width for the first element in the set of matched elements. + */ + width(): number; + /** + * Set the CSS width of each element in the set of matched elements. + * + * @param value An integer representing the number of pixels, or an integer along with an optional unit of measure appended (as a string). + */ + width(value: number|string): JQuery; + /** + * Set the CSS width of each element in the set of matched elements. + * + * @param func A function returning the width to set. Receives the index position of the element in the set and the old width as arguments. Within the function, this refers to the current element in the set. + */ + width(func: (index: number, width: number) => number|string): JQuery; + + /** + * Remove from the queue all items that have not yet been run. + * + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + */ + clearQueue(queueName?: string): JQuery; + + /** + * Store arbitrary data associated with the matched elements. + * + * @param key A string naming the piece of data to set. + * @param value The new data value; it can be any Javascript type including Array or Object. + */ + data(key: string, value: any): JQuery; + /** + * Return the value at the named data store for the first element in the jQuery collection, as set by data(name, value) or by an HTML5 data-* attribute. + * + * @param key Name of the data stored. + */ + data(key: string): any; + /** + * Store arbitrary data associated with the matched elements. + * + * @param obj An object of key-value pairs of data to update. + */ + data(obj: { [key: string]: any; }): JQuery; + /** + * Return the value at the named data store for the first element in the jQuery collection, as set by data(name, value) or by an HTML5 data-* attribute. + */ + data(): any; + + /** + * Execute the next function on the queue for the matched elements. + * + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + */ + dequeue(queueName?: string): JQuery; + + /** + * Remove a previously-stored piece of data. + * + * @param name A string naming the piece of data to delete or space-separated string naming the pieces of data to delete. + */ + removeData(name: string): JQuery; + /** + * Remove a previously-stored piece of data. + * + * @param list An array of strings naming the pieces of data to delete. + */ + removeData(list: string[]): JQuery; + + /** + * Return a Promise object to observe when all actions of a certain type bound to the collection, queued or not, have finished. + * + * @param type The type of queue that needs to be observed. (default: fx) + * @param target Object onto which the promise methods have to be attached + */ + promise(type?: string, target?: Object): JQueryPromise; + + /** + * Perform a custom animation of a set of CSS properties. + * + * @param properties An object of CSS properties and values that the animation will move toward. + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + animate(properties: Object, duration?: string|number, complete?: Function): JQuery; + /** + * Perform a custom animation of a set of CSS properties. + * + * @param properties An object of CSS properties and values that the animation will move toward. + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. (default: swing) + * @param complete A function to call once the animation is complete. + */ + animate(properties: Object, duration?: string|number, easing?: string, complete?: Function): JQuery; + /** + * Perform a custom animation of a set of CSS properties. + * + * @param properties An object of CSS properties and values that the animation will move toward. + * @param options A map of additional options to pass to the method. + */ + animate(properties: Object, options: JQueryAnimationOptions): JQuery; + + /** + * Set a timer to delay execution of subsequent items in the queue. + * + * @param duration An integer indicating the number of milliseconds to delay execution of the next item in the queue. + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + */ + delay(duration: number, queueName?: string): JQuery; + + /** + * Display the matched elements by fading them to opaque. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + fadeIn(duration?: number|string, complete?: Function): JQuery; + /** + * Display the matched elements by fading them to opaque. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + fadeIn(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Display the matched elements by fading them to opaque. + * + * @param options A map of additional options to pass to the method. + */ + fadeIn(options: JQueryAnimationOptions): JQuery; + + /** + * Hide the matched elements by fading them to transparent. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + fadeOut(duration?: number|string, complete?: Function): JQuery; + /** + * Hide the matched elements by fading them to transparent. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + fadeOut(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Hide the matched elements by fading them to transparent. + * + * @param options A map of additional options to pass to the method. + */ + fadeOut(options: JQueryAnimationOptions): JQuery; + + /** + * Adjust the opacity of the matched elements. + * + * @param duration A string or number determining how long the animation will run. + * @param opacity A number between 0 and 1 denoting the target opacity. + * @param complete A function to call once the animation is complete. + */ + fadeTo(duration: string|number, opacity: number, complete?: Function): JQuery; + /** + * Adjust the opacity of the matched elements. + * + * @param duration A string or number determining how long the animation will run. + * @param opacity A number between 0 and 1 denoting the target opacity. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + fadeTo(duration: string|number, opacity: number, easing?: string, complete?: Function): JQuery; + + /** + * Display or hide the matched elements by animating their opacity. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + fadeToggle(duration?: number|string, complete?: Function): JQuery; + /** + * Display or hide the matched elements by animating their opacity. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + fadeToggle(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Display or hide the matched elements by animating their opacity. + * + * @param options A map of additional options to pass to the method. + */ + fadeToggle(options: JQueryAnimationOptions): JQuery; + + /** + * Stop the currently-running animation, remove all queued animations, and complete all animations for the matched elements. + * + * @param queue The name of the queue in which to stop animations. + */ + finish(queue?: string): JQuery; + + /** + * Hide the matched elements. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + hide(duration?: number|string, complete?: Function): JQuery; + /** + * Hide the matched elements. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + hide(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Hide the matched elements. + * + * @param options A map of additional options to pass to the method. + */ + hide(options: JQueryAnimationOptions): JQuery; + + /** + * Display the matched elements. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + show(duration?: number|string, complete?: Function): JQuery; + /** + * Display the matched elements. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + show(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Display the matched elements. + * + * @param options A map of additional options to pass to the method. + */ + show(options: JQueryAnimationOptions): JQuery; + + /** + * Display the matched elements with a sliding motion. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + slideDown(duration?: number|string, complete?: Function): JQuery; + /** + * Display the matched elements with a sliding motion. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + slideDown(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Display the matched elements with a sliding motion. + * + * @param options A map of additional options to pass to the method. + */ + slideDown(options: JQueryAnimationOptions): JQuery; + + /** + * Display or hide the matched elements with a sliding motion. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + slideToggle(duration?: number|string, complete?: Function): JQuery; + /** + * Display or hide the matched elements with a sliding motion. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + slideToggle(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Display or hide the matched elements with a sliding motion. + * + * @param options A map of additional options to pass to the method. + */ + slideToggle(options: JQueryAnimationOptions): JQuery; + + /** + * Hide the matched elements with a sliding motion. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + slideUp(duration?: number|string, complete?: Function): JQuery; + /** + * Hide the matched elements with a sliding motion. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + slideUp(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Hide the matched elements with a sliding motion. + * + * @param options A map of additional options to pass to the method. + */ + slideUp(options: JQueryAnimationOptions): JQuery; + + /** + * Stop the currently-running animation on the matched elements. + * + * @param clearQueue A Boolean indicating whether to remove queued animation as well. Defaults to false. + * @param jumpToEnd A Boolean indicating whether to complete the current animation immediately. Defaults to false. + */ + stop(clearQueue?: boolean, jumpToEnd?: boolean): JQuery; + /** + * Stop the currently-running animation on the matched elements. + * + * @param queue The name of the queue in which to stop animations. + * @param clearQueue A Boolean indicating whether to remove queued animation as well. Defaults to false. + * @param jumpToEnd A Boolean indicating whether to complete the current animation immediately. Defaults to false. + */ + stop(queue?: string, clearQueue?: boolean, jumpToEnd?: boolean): JQuery; + + /** + * Display or hide the matched elements. + * + * @param duration A string or number determining how long the animation will run. + * @param complete A function to call once the animation is complete. + */ + toggle(duration?: number|string, complete?: Function): JQuery; + /** + * Display or hide the matched elements. + * + * @param duration A string or number determining how long the animation will run. + * @param easing A string indicating which easing function to use for the transition. + * @param complete A function to call once the animation is complete. + */ + toggle(duration?: number|string, easing?: string, complete?: Function): JQuery; + /** + * Display or hide the matched elements. + * + * @param options A map of additional options to pass to the method. + */ + toggle(options: JQueryAnimationOptions): JQuery; + /** + * Display or hide the matched elements. + * + * @param showOrHide A Boolean indicating whether to show or hide the elements. + */ + toggle(showOrHide: boolean): JQuery; + + /** + * Attach a handler to an event for the elements. + * + * @param eventType A string containing one or more DOM event types, such as "click" or "submit," or custom event names. + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + bind(eventType: string, eventData: any, handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Attach a handler to an event for the elements. + * + * @param eventType A string containing one or more DOM event types, such as "click" or "submit," or custom event names. + * @param handler A function to execute each time the event is triggered. + */ + bind(eventType: string, handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Attach a handler to an event for the elements. + * + * @param eventType A string containing one or more DOM event types, such as "click" or "submit," or custom event names. + * @param eventData An object containing data that will be passed to the event handler. + * @param preventBubble Setting the third argument to false will attach a function that prevents the default action from occurring and stops the event from bubbling. The default is true. + */ + bind(eventType: string, eventData: any, preventBubble: boolean): JQuery; + /** + * Attach a handler to an event for the elements. + * + * @param eventType A string containing one or more DOM event types, such as "click" or "submit," or custom event names. + * @param preventBubble Setting the third argument to false will attach a function that prevents the default action from occurring and stops the event from bubbling. The default is true. + */ + bind(eventType: string, preventBubble: boolean): JQuery; + /** + * Attach a handler to an event for the elements. + * + * @param events An object containing one or more DOM event types and functions to execute for them. + */ + bind(events: any): JQuery; + + /** + * Trigger the "blur" event on an element + */ + blur(): JQuery; + /** + * Bind an event handler to the "blur" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + blur(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "blur" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + blur(eventData?: any, handler?: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "change" event on an element. + */ + change(): JQuery; + /** + * Bind an event handler to the "change" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + change(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "change" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + change(eventData?: any, handler?: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "click" event on an element. + */ + click(): JQuery; + /** + * Bind an event handler to the "click" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + */ + click(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "click" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + click(eventData?: any, handler?: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "dblclick" event on an element. + */ + dblclick(): JQuery; + /** + * Bind an event handler to the "dblclick" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + dblclick(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "dblclick" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + dblclick(eventData?: any, handler?: (eventObject: JQueryEventObject) => any): JQuery; + + delegate(selector: any, eventType: string, handler: (eventObject: JQueryEventObject) => any): JQuery; + delegate(selector: any, eventType: string, eventData: any, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "focus" event on an element. + */ + focus(): JQuery; + /** + * Bind an event handler to the "focus" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + focus(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "focus" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + focus(eventData?: any, handler?: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Bind an event handler to the "focusin" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + focusin(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "focusin" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + focusin(eventData: Object, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Bind an event handler to the "focusout" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + focusout(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "focusout" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + focusout(eventData: Object, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Bind two handlers to the matched elements, to be executed when the mouse pointer enters and leaves the elements. + * + * @param handlerIn A function to execute when the mouse pointer enters the element. + * @param handlerOut A function to execute when the mouse pointer leaves the element. + */ + hover(handlerIn: (eventObject: JQueryEventObject) => any, handlerOut: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind a single handler to the matched elements, to be executed when the mouse pointer enters or leaves the elements. + * + * @param handlerInOut A function to execute when the mouse pointer enters or leaves the element. + */ + hover(handlerInOut: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "keydown" event on an element. + */ + keydown(): JQuery; + /** + * Bind an event handler to the "keydown" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + keydown(handler: (eventObject: JQueryKeyEventObject) => any): JQuery; + /** + * Bind an event handler to the "keydown" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + keydown(eventData?: any, handler?: (eventObject: JQueryKeyEventObject) => any): JQuery; + + /** + * Trigger the "keypress" event on an element. + */ + keypress(): JQuery; + /** + * Bind an event handler to the "keypress" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + keypress(handler: (eventObject: JQueryKeyEventObject) => any): JQuery; + /** + * Bind an event handler to the "keypress" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + keypress(eventData?: any, handler?: (eventObject: JQueryKeyEventObject) => any): JQuery; + + /** + * Trigger the "keyup" event on an element. + */ + keyup(): JQuery; + /** + * Bind an event handler to the "keyup" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + keyup(handler: (eventObject: JQueryKeyEventObject) => any): JQuery; + /** + * Bind an event handler to the "keyup" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + keyup(eventData?: any, handler?: (eventObject: JQueryKeyEventObject) => any): JQuery; + + /** + * Bind an event handler to the "load" JavaScript event. + * + * @param handler A function to execute when the event is triggered. + */ + load(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "load" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + load(eventData?: any, handler?: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "mousedown" event on an element. + */ + mousedown(): JQuery; + /** + * Bind an event handler to the "mousedown" JavaScript event. + * + * @param handler A function to execute when the event is triggered. + */ + mousedown(handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + /** + * Bind an event handler to the "mousedown" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + mousedown(eventData: Object, handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + + /** + * Trigger the "mouseenter" event on an element. + */ + mouseenter(): JQuery; + /** + * Bind an event handler to be fired when the mouse enters an element. + * + * @param handler A function to execute when the event is triggered. + */ + mouseenter(handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + /** + * Bind an event handler to be fired when the mouse enters an element. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + mouseenter(eventData: Object, handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + + /** + * Trigger the "mouseleave" event on an element. + */ + mouseleave(): JQuery; + /** + * Bind an event handler to be fired when the mouse leaves an element. + * + * @param handler A function to execute when the event is triggered. + */ + mouseleave(handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + /** + * Bind an event handler to be fired when the mouse leaves an element. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + mouseleave(eventData: Object, handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + + /** + * Trigger the "mousemove" event on an element. + */ + mousemove(): JQuery; + /** + * Bind an event handler to the "mousemove" JavaScript event. + * + * @param handler A function to execute when the event is triggered. + */ + mousemove(handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + /** + * Bind an event handler to the "mousemove" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + mousemove(eventData: Object, handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + + /** + * Trigger the "mouseout" event on an element. + */ + mouseout(): JQuery; + /** + * Bind an event handler to the "mouseout" JavaScript event. + * + * @param handler A function to execute when the event is triggered. + */ + mouseout(handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + /** + * Bind an event handler to the "mouseout" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + mouseout(eventData: Object, handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + + /** + * Trigger the "mouseover" event on an element. + */ + mouseover(): JQuery; + /** + * Bind an event handler to the "mouseover" JavaScript event. + * + * @param handler A function to execute when the event is triggered. + */ + mouseover(handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + /** + * Bind an event handler to the "mouseover" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + mouseover(eventData: Object, handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + + /** + * Trigger the "mouseup" event on an element. + */ + mouseup(): JQuery; + /** + * Bind an event handler to the "mouseup" JavaScript event. + * + * @param handler A function to execute when the event is triggered. + */ + mouseup(handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + /** + * Bind an event handler to the "mouseup" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + mouseup(eventData: Object, handler: (eventObject: JQueryMouseEventObject) => any): JQuery; + + /** + * Remove an event handler. + */ + off(): JQuery; + /** + * Remove an event handler. + * + * @param events One or more space-separated event types and optional namespaces, or just namespaces, such as "click", "keydown.myPlugin", or ".myPlugin". + * @param selector A selector which should match the one originally passed to .on() when attaching event handlers. + * @param handler A handler function previously attached for the event(s), or the special value false. + */ + off(events: string, selector?: string, handler?: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Remove an event handler. + * + * @param events One or more space-separated event types and optional namespaces, or just namespaces, such as "click", "keydown.myPlugin", or ".myPlugin". + * @param handler A handler function previously attached for the event(s), or the special value false. + */ + off(events: string, handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Remove an event handler. + * + * @param events An object where the string keys represent one or more space-separated event types and optional namespaces, and the values represent handler functions previously attached for the event(s). + * @param selector A selector which should match the one originally passed to .on() when attaching event handlers. + */ + off(events: { [key: string]: any; }, selector?: string): JQuery; + + /** + * Attach an event handler function for one or more events to the selected elements. + * + * @param events One or more space-separated event types and optional namespaces, such as "click" or "keydown.myPlugin". + * @param handler A function to execute when the event is triggered. The value false is also allowed as a shorthand for a function that simply does return false. Rest parameter args is for optional parameters passed to jQuery.trigger(). Note that the actual parameters on the event handler function must be marked as optional (? syntax). + */ + on(events: string, handler: (eventObject: JQueryEventObject, ...args: any[]) => any): JQuery; + /** + * Attach an event handler function for one or more events to the selected elements. + * + * @param events One or more space-separated event types and optional namespaces, such as "click" or "keydown.myPlugin". + * @param data Data to be passed to the handler in event.data when an event is triggered. + * @param handler A function to execute when the event is triggered. The value false is also allowed as a shorthand for a function that simply does return false. + */ + on(events: string, data : any, handler: (eventObject: JQueryEventObject, ...args: any[]) => any): JQuery; + /** + * Attach an event handler function for one or more events to the selected elements. + * + * @param events One or more space-separated event types and optional namespaces, such as "click" or "keydown.myPlugin". + * @param selector A selector string to filter the descendants of the selected elements that trigger the event. If the selector is null or omitted, the event is always triggered when it reaches the selected element. + * @param handler A function to execute when the event is triggered. The value false is also allowed as a shorthand for a function that simply does return false. + */ + on(events: string, selector: string, handler: (eventObject: JQueryEventObject, ...eventData: any[]) => any): JQuery; + /** + * Attach an event handler function for one or more events to the selected elements. + * + * @param events One or more space-separated event types and optional namespaces, such as "click" or "keydown.myPlugin". + * @param selector A selector string to filter the descendants of the selected elements that trigger the event. If the selector is null or omitted, the event is always triggered when it reaches the selected element. + * @param data Data to be passed to the handler in event.data when an event is triggered. + * @param handler A function to execute when the event is triggered. The value false is also allowed as a shorthand for a function that simply does return false. + */ + on(events: string, selector: string, data: any, handler: (eventObject: JQueryEventObject, ...eventData: any[]) => any): JQuery; + /** + * Attach an event handler function for one or more events to the selected elements. + * + * @param events An object in which the string keys represent one or more space-separated event types and optional namespaces, and the values represent a handler function to be called for the event(s). + * @param selector A selector string to filter the descendants of the selected elements that will call the handler. If the selector is null or omitted, the handler is always called when it reaches the selected element. + * @param data Data to be passed to the handler in event.data when an event occurs. + */ + on(events: { [key: string]: any; }, selector?: string, data?: any): JQuery; + /** + * Attach an event handler function for one or more events to the selected elements. + * + * @param events An object in which the string keys represent one or more space-separated event types and optional namespaces, and the values represent a handler function to be called for the event(s). + * @param data Data to be passed to the handler in event.data when an event occurs. + */ + on(events: { [key: string]: any; }, data?: any): JQuery; + + /** + * Attach a handler to an event for the elements. The handler is executed at most once per element per event type. + * + * @param events A string containing one or more JavaScript event types, such as "click" or "submit," or custom event names. + * @param handler A function to execute at the time the event is triggered. + */ + one(events: string, handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Attach a handler to an event for the elements. The handler is executed at most once per element per event type. + * + * @param events A string containing one or more JavaScript event types, such as "click" or "submit," or custom event names. + * @param data An object containing data that will be passed to the event handler. + * @param handler A function to execute at the time the event is triggered. + */ + one(events: string, data: Object, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Attach a handler to an event for the elements. The handler is executed at most once per element per event type. + * + * @param events One or more space-separated event types and optional namespaces, such as "click" or "keydown.myPlugin". + * @param selector A selector string to filter the descendants of the selected elements that trigger the event. If the selector is null or omitted, the event is always triggered when it reaches the selected element. + * @param handler A function to execute when the event is triggered. The value false is also allowed as a shorthand for a function that simply does return false. + */ + one(events: string, selector: string, handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Attach a handler to an event for the elements. The handler is executed at most once per element per event type. + * + * @param events One or more space-separated event types and optional namespaces, such as "click" or "keydown.myPlugin". + * @param selector A selector string to filter the descendants of the selected elements that trigger the event. If the selector is null or omitted, the event is always triggered when it reaches the selected element. + * @param data Data to be passed to the handler in event.data when an event is triggered. + * @param handler A function to execute when the event is triggered. The value false is also allowed as a shorthand for a function that simply does return false. + */ + one(events: string, selector: string, data: any, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Attach a handler to an event for the elements. The handler is executed at most once per element per event type. + * + * @param events An object in which the string keys represent one or more space-separated event types and optional namespaces, and the values represent a handler function to be called for the event(s). + * @param selector A selector string to filter the descendants of the selected elements that will call the handler. If the selector is null or omitted, the handler is always called when it reaches the selected element. + * @param data Data to be passed to the handler in event.data when an event occurs. + */ + one(events: { [key: string]: any; }, selector?: string, data?: any): JQuery; + + /** + * Attach a handler to an event for the elements. The handler is executed at most once per element per event type. + * + * @param events An object in which the string keys represent one or more space-separated event types and optional namespaces, and the values represent a handler function to be called for the event(s). + * @param data Data to be passed to the handler in event.data when an event occurs. + */ + one(events: { [key: string]: any; }, data?: any): JQuery; + + + /** + * Specify a function to execute when the DOM is fully loaded. + * + * @param handler A function to execute after the DOM is ready. + */ + ready(handler: (jQueryAlias?: JQueryStatic) => any): JQuery; + + /** + * Trigger the "resize" event on an element. + */ + resize(): JQuery; + /** + * Bind an event handler to the "resize" JavaScript event. + * + * @param handler A function to execute each time the event is triggered. + */ + resize(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "resize" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + resize(eventData: Object, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "scroll" event on an element. + */ + scroll(): JQuery; + /** + * Bind an event handler to the "scroll" JavaScript event. + * + * @param handler A function to execute each time the event is triggered. + */ + scroll(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "scroll" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + scroll(eventData: Object, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "select" event on an element. + */ + select(): JQuery; + /** + * Bind an event handler to the "select" JavaScript event. + * + * @param handler A function to execute each time the event is triggered. + */ + select(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "select" JavaScript event. + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + select(eventData: Object, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Trigger the "submit" event on an element. + */ + submit(): JQuery; + /** + * Bind an event handler to the "submit" JavaScript event + * + * @param handler A function to execute each time the event is triggered. + */ + submit(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "submit" JavaScript event + * + * @param eventData An object containing data that will be passed to the event handler. + * @param handler A function to execute each time the event is triggered. + */ + submit(eventData?: any, handler?: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Execute all handlers and behaviors attached to the matched elements for the given event type. + * + * @param eventType A string containing a JavaScript event type, such as click or submit. + * @param extraParameters Additional parameters to pass along to the event handler. + */ + trigger(eventType: string, extraParameters?: any[]|Object): JQuery; + /** + * Execute all handlers and behaviors attached to the matched elements for the given event type. + * + * @param event A jQuery.Event object. + * @param extraParameters Additional parameters to pass along to the event handler. + */ + trigger(event: JQueryEventObject, extraParameters?: any[]|Object): JQuery; + + /** + * Execute all handlers attached to an element for an event. + * + * @param eventType A string containing a JavaScript event type, such as click or submit. + * @param extraParameters An array of additional parameters to pass along to the event handler. + */ + triggerHandler(eventType: string, ...extraParameters: any[]): Object; + + /** + * Execute all handlers attached to an element for an event. + * + * @param event A jQuery.Event object. + * @param extraParameters An array of additional parameters to pass along to the event handler. + */ + triggerHandler(event: JQueryEventObject, ...extraParameters: any[]): Object; + + /** + * Remove a previously-attached event handler from the elements. + * + * @param eventType A string containing a JavaScript event type, such as click or submit. + * @param handler The function that is to be no longer executed. + */ + unbind(eventType?: string, handler?: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Remove a previously-attached event handler from the elements. + * + * @param eventType A string containing a JavaScript event type, such as click or submit. + * @param fls Unbinds the corresponding 'return false' function that was bound using .bind( eventType, false ). + */ + unbind(eventType: string, fls: boolean): JQuery; + /** + * Remove a previously-attached event handler from the elements. + * + * @param evt A JavaScript event object as passed to an event handler. + */ + unbind(evt: any): JQuery; + + /** + * Remove a handler from the event for all elements which match the current selector, based upon a specific set of root elements. + */ + undelegate(): JQuery; + /** + * Remove a handler from the event for all elements which match the current selector, based upon a specific set of root elements. + * + * @param selector A selector which will be used to filter the event results. + * @param eventType A string containing a JavaScript event type, such as "click" or "keydown" + * @param handler A function to execute at the time the event is triggered. + */ + undelegate(selector: string, eventType: string, handler?: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Remove a handler from the event for all elements which match the current selector, based upon a specific set of root elements. + * + * @param selector A selector which will be used to filter the event results. + * @param events An object of one or more event types and previously bound functions to unbind from them. + */ + undelegate(selector: string, events: Object): JQuery; + /** + * Remove a handler from the event for all elements which match the current selector, based upon a specific set of root elements. + * + * @param namespace A string containing a namespace to unbind all events from. + */ + undelegate(namespace: string): JQuery; + + /** + * Bind an event handler to the "unload" JavaScript event. (DEPRECATED from v1.8) + * + * @param handler A function to execute when the event is triggered. + */ + unload(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "unload" JavaScript event. (DEPRECATED from v1.8) + * + * @param eventData A plain object of data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + unload(eventData?: any, handler?: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * The DOM node context originally passed to jQuery(); if none was passed then context will likely be the document. (DEPRECATED from v1.10) + */ + context: Element; + + jquery: string; + + /** + * Bind an event handler to the "error" JavaScript event. (DEPRECATED from v1.8) + * + * @param handler A function to execute when the event is triggered. + */ + error(handler: (eventObject: JQueryEventObject) => any): JQuery; + /** + * Bind an event handler to the "error" JavaScript event. (DEPRECATED from v1.8) + * + * @param eventData A plain object of data that will be passed to the event handler. + * @param handler A function to execute when the event is triggered. + */ + error(eventData: any, handler: (eventObject: JQueryEventObject) => any): JQuery; + + /** + * Add a collection of DOM elements onto the jQuery stack. + * + * @param elements An array of elements to push onto the stack and make into a new jQuery object. + */ + pushStack(elements: any[]): JQuery; + /** + * Add a collection of DOM elements onto the jQuery stack. + * + * @param elements An array of elements to push onto the stack and make into a new jQuery object. + * @param name The name of a jQuery method that generated the array of elements. + * @param arguments The arguments that were passed in to the jQuery method (for serialization). + */ + pushStack(elements: any[], name: string, arguments: any[]): JQuery; + + /** + * Insert content, specified by the parameter, after each element in the set of matched elements. + * + * param content1 HTML string, DOM element, array of elements, or jQuery object to insert after each element in the set of matched elements. + * param content2 One or more additional DOM elements, arrays of elements, HTML strings, or jQuery objects to insert after each element in the set of matched elements. + */ + after(content1: JQuery|any[]|Element|Text|string, ...content2: any[]): JQuery; + /** + * Insert content, specified by the parameter, after each element in the set of matched elements. + * + * param func A function that returns an HTML string, DOM element(s), or jQuery object to insert after each element in the set of matched elements. Receives the index position of the element in the set as an argument. Within the function, this refers to the current element in the set. + */ + after(func: (index: number, html: string) => string|Element|JQuery): JQuery; + + /** + * Insert content, specified by the parameter, to the end of each element in the set of matched elements. + * + * param content1 DOM element, array of elements, HTML string, or jQuery object to insert at the end of each element in the set of matched elements. + * param content2 One or more additional DOM elements, arrays of elements, HTML strings, or jQuery objects to insert at the end of each element in the set of matched elements. + */ + append(content1: JQuery|any[]|Element|Text|string, ...content2: any[]): JQuery; + /** + * Insert content, specified by the parameter, to the end of each element in the set of matched elements. + * + * param func A function that returns an HTML string, DOM element(s), or jQuery object to insert at the end of each element in the set of matched elements. Receives the index position of the element in the set and the old HTML value of the element as arguments. Within the function, this refers to the current element in the set. + */ + append(func: (index: number, html: string) => string|Element|JQuery): JQuery; + + /** + * Insert every element in the set of matched elements to the end of the target. + * + * @param target A selector, element, HTML string, array of elements, or jQuery object; the matched set of elements will be inserted at the end of the element(s) specified by this parameter. + */ + appendTo(target: JQuery|any[]|Element|string): JQuery; + + /** + * Insert content, specified by the parameter, before each element in the set of matched elements. + * + * param content1 HTML string, DOM element, array of elements, or jQuery object to insert before each element in the set of matched elements. + * param content2 One or more additional DOM elements, arrays of elements, HTML strings, or jQuery objects to insert before each element in the set of matched elements. + */ + before(content1: JQuery|any[]|Element|Text|string, ...content2: any[]): JQuery; + /** + * Insert content, specified by the parameter, before each element in the set of matched elements. + * + * param func A function that returns an HTML string, DOM element(s), or jQuery object to insert before each element in the set of matched elements. Receives the index position of the element in the set as an argument. Within the function, this refers to the current element in the set. + */ + before(func: (index: number, html: string) => string|Element|JQuery): JQuery; + + /** + * Create a deep copy of the set of matched elements. + * + * param withDataAndEvents A Boolean indicating whether event handlers and data should be copied along with the elements. The default value is false. + * param deepWithDataAndEvents A Boolean indicating whether event handlers and data for all children of the cloned element should be copied. By default its value matches the first argument's value (which defaults to false). + */ + clone(withDataAndEvents?: boolean, deepWithDataAndEvents?: boolean): JQuery; + + /** + * Remove the set of matched elements from the DOM. + * + * param selector A selector expression that filters the set of matched elements to be removed. + */ + detach(selector?: string): JQuery; + + /** + * Remove all child nodes of the set of matched elements from the DOM. + */ + empty(): JQuery; + + /** + * Insert every element in the set of matched elements after the target. + * + * param target A selector, element, array of elements, HTML string, or jQuery object; the matched set of elements will be inserted after the element(s) specified by this parameter. + */ + insertAfter(target: JQuery|any[]|Element|Text|string): JQuery; + + /** + * Insert every element in the set of matched elements before the target. + * + * param target A selector, element, array of elements, HTML string, or jQuery object; the matched set of elements will be inserted before the element(s) specified by this parameter. + */ + insertBefore(target: JQuery|any[]|Element|Text|string): JQuery; + + /** + * Insert content, specified by the parameter, to the beginning of each element in the set of matched elements. + * + * param content1 DOM element, array of elements, HTML string, or jQuery object to insert at the beginning of each element in the set of matched elements. + * param content2 One or more additional DOM elements, arrays of elements, HTML strings, or jQuery objects to insert at the beginning of each element in the set of matched elements. + */ + prepend(content1: JQuery|any[]|Element|Text|string, ...content2: any[]): JQuery; + /** + * Insert content, specified by the parameter, to the beginning of each element in the set of matched elements. + * + * param func A function that returns an HTML string, DOM element(s), or jQuery object to insert at the beginning of each element in the set of matched elements. Receives the index position of the element in the set and the old HTML value of the element as arguments. Within the function, this refers to the current element in the set. + */ + prepend(func: (index: number, html: string) => string|Element|JQuery): JQuery; + + /** + * Insert every element in the set of matched elements to the beginning of the target. + * + * @param target A selector, element, HTML string, array of elements, or jQuery object; the matched set of elements will be inserted at the beginning of the element(s) specified by this parameter. + */ + prependTo(target: JQuery|any[]|Element|string): JQuery; + + /** + * Remove the set of matched elements from the DOM. + * + * @param selector A selector expression that filters the set of matched elements to be removed. + */ + remove(selector?: string): JQuery; + + /** + * Replace each target element with the set of matched elements. + * + * @param target A selector string, jQuery object, DOM element, or array of elements indicating which element(s) to replace. + */ + replaceAll(target: JQuery|any[]|Element|string): JQuery; + + /** + * Replace each element in the set of matched elements with the provided new content and return the set of elements that was removed. + * + * param newContent The content to insert. May be an HTML string, DOM element, array of DOM elements, or jQuery object. + */ + replaceWith(newContent: JQuery|any[]|Element|Text|string): JQuery; + /** + * Replace each element in the set of matched elements with the provided new content and return the set of elements that was removed. + * + * param func A function that returns content with which to replace the set of matched elements. + */ + replaceWith(func: () => Element|JQuery): JQuery; + + /** + * Get the combined text contents of each element in the set of matched elements, including their descendants. + */ + text(): string; + /** + * Set the content of each element in the set of matched elements to the specified text. + * + * @param text The text to set as the content of each matched element. When Number or Boolean is supplied, it will be converted to a String representation. + */ + text(text: string|number|boolean): JQuery; + /** + * Set the content of each element in the set of matched elements to the specified text. + * + * @param func A function returning the text content to set. Receives the index position of the element in the set and the old text value as arguments. + */ + text(func: (index: number, text: string) => string): JQuery; + + /** + * Retrieve all the elements contained in the jQuery set, as an array. + */ + toArray(): any[]; + + /** + * Remove the parents of the set of matched elements from the DOM, leaving the matched elements in their place. + */ + unwrap(): JQuery; + + /** + * Wrap an HTML structure around each element in the set of matched elements. + * + * @param wrappingElement A selector, element, HTML string, or jQuery object specifying the structure to wrap around the matched elements. + */ + wrap(wrappingElement: JQuery|Element|string): JQuery; + /** + * Wrap an HTML structure around each element in the set of matched elements. + * + * @param func A callback function returning the HTML content or jQuery object to wrap around the matched elements. Receives the index position of the element in the set as an argument. Within the function, this refers to the current element in the set. + */ + wrap(func: (index: number) => string|JQuery): JQuery; + + /** + * Wrap an HTML structure around all elements in the set of matched elements. + * + * @param wrappingElement A selector, element, HTML string, or jQuery object specifying the structure to wrap around the matched elements. + */ + wrapAll(wrappingElement: JQuery|Element|string): JQuery; + wrapAll(func: (index: number) => string): JQuery; + + /** + * Wrap an HTML structure around the content of each element in the set of matched elements. + * + * @param wrappingElement An HTML snippet, selector expression, jQuery object, or DOM element specifying the structure to wrap around the content of the matched elements. + */ + wrapInner(wrappingElement: JQuery|Element|string): JQuery; + /** + * Wrap an HTML structure around the content of each element in the set of matched elements. + * + * @param func A callback function which generates a structure to wrap around the content of the matched elements. Receives the index position of the element in the set as an argument. Within the function, this refers to the current element in the set. + */ + wrapInner(func: (index: number) => string): JQuery; + + /** + * Iterate over a jQuery object, executing a function for each matched element. + * + * @param func A function to execute for each matched element. + */ + each(func: (index: number, elem: Element) => any): JQuery; + + /** + * Retrieve one of the elements matched by the jQuery object. + * + * @param index A zero-based integer indicating which element to retrieve. + */ + get(index: number): HTMLElement; + /** + * Retrieve the elements matched by the jQuery object. + */ + get(): any[]; + + /** + * Search for a given element from among the matched elements. + */ + index(): number; + /** + * Search for a given element from among the matched elements. + * + * @param selector A selector representing a jQuery collection in which to look for an element. + */ + index(selector: string|JQuery|Element): number; + + /** + * The number of elements in the jQuery object. + */ + length: number; + /** + * A selector representing selector passed to jQuery(), if any, when creating the original set. + * version deprecated: 1.7, removed: 1.9 + */ + selector: string; + [index: string]: any; + [index: number]: HTMLElement; + + /** + * Add elements to the set of matched elements. + * + * @param selector A string representing a selector expression to find additional elements to add to the set of matched elements. + * @param context The point in the document at which the selector should begin matching; similar to the context argument of the $(selector, context) method. + */ + add(selector: string, context?: Element): JQuery; + /** + * Add elements to the set of matched elements. + * + * @param elements One or more elements to add to the set of matched elements. + */ + add(...elements: Element[]): JQuery; + /** + * Add elements to the set of matched elements. + * + * @param html An HTML fragment to add to the set of matched elements. + */ + add(html: string): JQuery; + /** + * Add elements to the set of matched elements. + * + * @param obj An existing jQuery object to add to the set of matched elements. + */ + add(obj: JQuery): JQuery; + + /** + * Get the children of each element in the set of matched elements, optionally filtered by a selector. + * + * @param selector A string containing a selector expression to match elements against. + */ + children(selector?: string): JQuery; + + /** + * For each element in the set, get the first element that matches the selector by testing the element itself and traversing up through its ancestors in the DOM tree. + * + * @param selector A string containing a selector expression to match elements against. + */ + closest(selector: string): JQuery; + /** + * For each element in the set, get the first element that matches the selector by testing the element itself and traversing up through its ancestors in the DOM tree. + * + * @param selector A string containing a selector expression to match elements against. + * @param context A DOM element within which a matching element may be found. If no context is passed in then the context of the jQuery set will be used instead. + */ + closest(selector: string, context?: Element): JQuery; + /** + * For each element in the set, get the first element that matches the selector by testing the element itself and traversing up through its ancestors in the DOM tree. + * + * @param obj A jQuery object to match elements against. + */ + closest(obj: JQuery): JQuery; + /** + * For each element in the set, get the first element that matches the selector by testing the element itself and traversing up through its ancestors in the DOM tree. + * + * @param element An element to match elements against. + */ + closest(element: Element): JQuery; + + /** + * Get an array of all the elements and selectors matched against the current element up through the DOM tree. + * + * @param selectors An array or string containing a selector expression to match elements against (can also be a jQuery object). + * @param context A DOM element within which a matching element may be found. If no context is passed in then the context of the jQuery set will be used instead. + */ + closest(selectors: any, context?: Element): any[]; + + /** + * Get the children of each element in the set of matched elements, including text and comment nodes. + */ + contents(): JQuery; + + /** + * End the most recent filtering operation in the current chain and return the set of matched elements to its previous state. + */ + end(): JQuery; + + /** + * Reduce the set of matched elements to the one at the specified index. + * + * @param index An integer indicating the 0-based position of the element. OR An integer indicating the position of the element, counting backwards from the last element in the set. + * + */ + eq(index: number): JQuery; + + /** + * Reduce the set of matched elements to those that match the selector or pass the function's test. + * + * @param selector A string containing a selector expression to match the current set of elements against. + */ + filter(selector: string): JQuery; + /** + * Reduce the set of matched elements to those that match the selector or pass the function's test. + * + * @param func A function used as a test for each element in the set. this is the current DOM element. + */ + filter(func: (index: number, element: Element) => any): JQuery; + /** + * Reduce the set of matched elements to those that match the selector or pass the function's test. + * + * @param element An element to match the current set of elements against. + */ + filter(element: Element): JQuery; + /** + * Reduce the set of matched elements to those that match the selector or pass the function's test. + * + * @param obj An existing jQuery object to match the current set of elements against. + */ + filter(obj: JQuery): JQuery; + + /** + * Get the descendants of each element in the current set of matched elements, filtered by a selector, jQuery object, or element. + * + * @param selector A string containing a selector expression to match elements against. + */ + find(selector: string): JQuery; + /** + * Get the descendants of each element in the current set of matched elements, filtered by a selector, jQuery object, or element. + * + * @param element An element to match elements against. + */ + find(element: Element): JQuery; + /** + * Get the descendants of each element in the current set of matched elements, filtered by a selector, jQuery object, or element. + * + * @param obj A jQuery object to match elements against. + */ + find(obj: JQuery): JQuery; + + /** + * Reduce the set of matched elements to the first in the set. + */ + first(): JQuery; + + /** + * Reduce the set of matched elements to those that have a descendant that matches the selector or DOM element. + * + * @param selector A string containing a selector expression to match elements against. + */ + has(selector: string): JQuery; + /** + * Reduce the set of matched elements to those that have a descendant that matches the selector or DOM element. + * + * @param contained A DOM element to match elements against. + */ + has(contained: Element): JQuery; + + /** + * Check the current matched set of elements against a selector, element, or jQuery object and return true if at least one of these elements matches the given arguments. + * + * @param selector A string containing a selector expression to match elements against. + */ + is(selector: string): boolean; + /** + * Check the current matched set of elements against a selector, element, or jQuery object and return true if at least one of these elements matches the given arguments. + * + * @param func A function used as a test for the set of elements. It accepts one argument, index, which is the element's index in the jQuery collection.Within the function, this refers to the current DOM element. + */ + is(func: (index: number, element: Element) => boolean): boolean; + /** + * Check the current matched set of elements against a selector, element, or jQuery object and return true if at least one of these elements matches the given arguments. + * + * @param obj An existing jQuery object to match the current set of elements against. + */ + is(obj: JQuery): boolean; + /** + * Check the current matched set of elements against a selector, element, or jQuery object and return true if at least one of these elements matches the given arguments. + * + * @param elements One or more elements to match the current set of elements against. + */ + is(elements: any): boolean; + + /** + * Reduce the set of matched elements to the final one in the set. + */ + last(): JQuery; + + /** + * Pass each element in the current matched set through a function, producing a new jQuery object containing the return values. + * + * @param callback A function object that will be invoked for each element in the current set. + */ + map(callback: (index: number, domElement: Element) => any): JQuery; + + /** + * Get the immediately following sibling of each element in the set of matched elements. If a selector is provided, it retrieves the next sibling only if it matches that selector. + * + * @param selector A string containing a selector expression to match elements against. + */ + next(selector?: string): JQuery; + + /** + * Get all following siblings of each element in the set of matched elements, optionally filtered by a selector. + * + * @param selector A string containing a selector expression to match elements against. + */ + nextAll(selector?: string): JQuery; + + /** + * Get all following siblings of each element up to but not including the element matched by the selector, DOM node, or jQuery object passed. + * + * @param selector A string containing a selector expression to indicate where to stop matching following sibling elements. + * @param filter A string containing a selector expression to match elements against. + */ + nextUntil(selector?: string, filter?: string): JQuery; + /** + * Get all following siblings of each element up to but not including the element matched by the selector, DOM node, or jQuery object passed. + * + * @param element A DOM node or jQuery object indicating where to stop matching following sibling elements. + * @param filter A string containing a selector expression to match elements against. + */ + nextUntil(element?: Element, filter?: string): JQuery; + /** + * Get all following siblings of each element up to but not including the element matched by the selector, DOM node, or jQuery object passed. + * + * @param obj A DOM node or jQuery object indicating where to stop matching following sibling elements. + * @param filter A string containing a selector expression to match elements against. + */ + nextUntil(obj?: JQuery, filter?: string): JQuery; + + /** + * Remove elements from the set of matched elements. + * + * @param selector A string containing a selector expression to match elements against. + */ + not(selector: string): JQuery; + /** + * Remove elements from the set of matched elements. + * + * @param func A function used as a test for each element in the set. this is the current DOM element. + */ + not(func: (index: number, element: Element) => boolean): JQuery; + /** + * Remove elements from the set of matched elements. + * + * @param elements One or more DOM elements to remove from the matched set. + */ + not(...elements: Element[]): JQuery; + /** + * Remove elements from the set of matched elements. + * + * @param obj An existing jQuery object to match the current set of elements against. + */ + not(obj: JQuery): JQuery; + + /** + * Get the closest ancestor element that is positioned. + */ + offsetParent(): JQuery; + + /** + * Get the parent of each element in the current set of matched elements, optionally filtered by a selector. + * + * @param selector A string containing a selector expression to match elements against. + */ + parent(selector?: string): JQuery; + + /** + * Get the ancestors of each element in the current set of matched elements, optionally filtered by a selector. + * + * @param selector A string containing a selector expression to match elements against. + */ + parents(selector?: string): JQuery; + + /** + * Get the ancestors of each element in the current set of matched elements, up to but not including the element matched by the selector, DOM node, or jQuery object. + * + * @param selector A string containing a selector expression to indicate where to stop matching ancestor elements. + * @param filter A string containing a selector expression to match elements against. + */ + parentsUntil(selector?: string, filter?: string): JQuery; + /** + * Get the ancestors of each element in the current set of matched elements, up to but not including the element matched by the selector, DOM node, or jQuery object. + * + * @param element A DOM node or jQuery object indicating where to stop matching ancestor elements. + * @param filter A string containing a selector expression to match elements against. + */ + parentsUntil(element?: Element, filter?: string): JQuery; + /** + * Get the ancestors of each element in the current set of matched elements, up to but not including the element matched by the selector, DOM node, or jQuery object. + * + * @param obj A DOM node or jQuery object indicating where to stop matching ancestor elements. + * @param filter A string containing a selector expression to match elements against. + */ + parentsUntil(obj?: JQuery, filter?: string): JQuery; + + /** + * Get the immediately preceding sibling of each element in the set of matched elements, optionally filtered by a selector. + * + * @param selector A string containing a selector expression to match elements against. + */ + prev(selector?: string): JQuery; + + /** + * Get all preceding siblings of each element in the set of matched elements, optionally filtered by a selector. + * + * @param selector A string containing a selector expression to match elements against. + */ + prevAll(selector?: string): JQuery; + + /** + * Get all preceding siblings of each element up to but not including the element matched by the selector, DOM node, or jQuery object. + * + * @param selector A string containing a selector expression to indicate where to stop matching preceding sibling elements. + * @param filter A string containing a selector expression to match elements against. + */ + prevUntil(selector?: string, filter?: string): JQuery; + /** + * Get all preceding siblings of each element up to but not including the element matched by the selector, DOM node, or jQuery object. + * + * @param element A DOM node or jQuery object indicating where to stop matching preceding sibling elements. + * @param filter A string containing a selector expression to match elements against. + */ + prevUntil(element?: Element, filter?: string): JQuery; + /** + * Get all preceding siblings of each element up to but not including the element matched by the selector, DOM node, or jQuery object. + * + * @param obj A DOM node or jQuery object indicating where to stop matching preceding sibling elements. + * @param filter A string containing a selector expression to match elements against. + */ + prevUntil(obj?: JQuery, filter?: string): JQuery; + + /** + * Get the siblings of each element in the set of matched elements, optionally filtered by a selector. + * + * @param selector A string containing a selector expression to match elements against. + */ + siblings(selector?: string): JQuery; + + /** + * Reduce the set of matched elements to a subset specified by a range of indices. + * + * @param start An integer indicating the 0-based position at which the elements begin to be selected. If negative, it indicates an offset from the end of the set. + * @param end An integer indicating the 0-based position at which the elements stop being selected. If negative, it indicates an offset from the end of the set. If omitted, the range continues until the end of the set. + */ + slice(start: number, end?: number): JQuery; + + /** + * Show the queue of functions to be executed on the matched elements. + * + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + */ + queue(queueName?: string): any[]; + /** + * Manipulate the queue of functions to be executed, once for each matched element. + * + * @param newQueue An array of functions to replace the current queue contents. + */ + queue(newQueue: Function[]): JQuery; + /** + * Manipulate the queue of functions to be executed, once for each matched element. + * + * @param callback The new function to add to the queue, with a function to call that will dequeue the next item. + */ + queue(callback: Function): JQuery; + /** + * Manipulate the queue of functions to be executed, once for each matched element. + * + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + * @param newQueue An array of functions to replace the current queue contents. + */ + queue(queueName: string, newQueue: Function[]): JQuery; + /** + * Manipulate the queue of functions to be executed, once for each matched element. + * + * @param queueName A string containing the name of the queue. Defaults to fx, the standard effects queue. + * @param callback The new function to add to the queue, with a function to call that will dequeue the next item. + */ + queue(queueName: string, callback: Function): JQuery; +} +declare module "jquery" { + export = $; +} +declare var jQuery: JQueryStatic; +declare var $: JQueryStatic; diff --git a/public/app/headers/lodash/lodash.d.ts b/public/app/headers/lodash/lodash.d.ts new file mode 100644 index 0000000000000..b6c95710bd9de --- /dev/null +++ b/public/app/headers/lodash/lodash.d.ts @@ -0,0 +1,8521 @@ +// Type definitions for Lo-Dash +// Project: http://lodash.com/ +// Definitions by: Brian Zengel , Ilya Mochalov +// Definitions: https://github.com/borisyankov/DefinitelyTyped + +declare var _: _.LoDashStatic; + +declare module _ { + interface LoDashStatic { + /** + * Creates a lodash object which wraps the given value to enable intuitive method chaining. + * + * In addition to Lo-Dash methods, wrappers also have the following Array methods: + * concat, join, pop, push, reverse, shift, slice, sort, splice, and unshift + * + * Chaining is supported in custom builds as long as the value method is implicitly or + * explicitly included in the build. + * + * The chainable wrapper functions are: + * after, assign, bind, bindAll, bindKey, chain, chunk, compact, compose, concat, countBy, + * createCallback, curry, debounce, defaults, defer, delay, difference, filter, flatten, + * forEach, forEachRight, forIn, forInRight, forOwn, forOwnRight, functions, groupBy, + * indexBy, initial, intersection, invert, invoke, keys, map, max, memoize, merge, min, + * object, omit, once, pairs, partial, partialRight, pick, pluck, pull, push, range, reject, + * remove, rest, reverse, sample, shuffle, slice, sort, sortBy, splice, tap, throttle, times, + * toArray, transform, union, uniq, unshift, unzip, values, where, without, wrap, and zip + * + * The non-chainable wrapper functions are: + * clone, cloneDeep, contains, escape, every, find, findIndex, findKey, findLast, + * findLastIndex, findLastKey, has, identity, indexOf, isArguments, isArray, isBoolean, + * isDate, isElement, isEmpty, isEqual, isFinite, isFunction, isNaN, isNull, isNumber, + * isObject, isPlainObject, isRegExp, isString, isUndefined, join, lastIndexOf, mixin, + * noConflict, parseInt, pop, random, reduce, reduceRight, result, shift, size, some, + * sortedIndex, runInContext, template, unescape, uniqueId, and value + * + * The wrapper functions first and last return wrapped values when n is provided, otherwise + * they return unwrapped values. + * + * Explicit chaining can be enabled by using the _.chain method. + **/ + (value: number): LoDashWrapper; + (value: string): LoDashStringWrapper; + (value: boolean): LoDashWrapper; + (value: Array): LoDashNumberArrayWrapper; + (value: Array): LoDashArrayWrapper; + (value: T): LoDashObjectWrapper; + (value: any): LoDashWrapper; + + /** + * The semantic version number. + **/ + VERSION: string; + + /** + * An object used to flag environments features. + **/ + support: Support; + + /** + * By default, the template delimiters used by Lo-Dash are similar to those in embedded Ruby + * (ERB). Change the following template settings to use alternative delimiters. + **/ + templateSettings: TemplateSettings; + } + + /** + * By default, the template delimiters used by Lo-Dash are similar to those in embedded Ruby + * (ERB). Change the following template settings to use alternative delimiters. + **/ + interface TemplateSettings { + /** + * The "escape" delimiter. + **/ + escape?: RegExp; + + /** + * The "evaluate" delimiter. + **/ + evaluate?: RegExp; + + /** + * An object to import into the template as local variables. + **/ + imports?: Dictionary; + + /** + * The "interpolate" delimiter. + **/ + interpolate?: RegExp; + + /** + * Used to reference the data object in the template text. + **/ + variable?: string; + } + + /** + * Creates a cache object to store key/value pairs. + */ + interface MapCache { + /** + * Removes `key` and its value from the cache. + * @param key The key of the value to remove. + * @return Returns `true` if the entry was removed successfully, else `false`. + */ + delete(key: string): boolean; + + /** + * Gets the cached value for `key`. + * @param key The key of the value to get. + * @return Returns the cached value. + */ + get(key: string): any; + + /** + * Checks if a cached value for `key` exists. + * @param key The key of the entry to check. + * @return Returns `true` if an entry for `key` exists, else `false`. + */ + has(key: string): boolean; + + /** + * Sets `value` to `key` of the cache. + * @param key The key of the value to cache. + * @param value The value to cache. + * @return Returns the cache object. + */ + set(key: string, value: any): _.Dictionary; + } + + /** + * An object used to flag environments features. + **/ + interface Support { + /** + * Detect if an arguments object's [[Class]] is resolvable (all but Firefox < 4, IE < 9). + **/ + argsClass: boolean; + + /** + * Detect if arguments objects are Object objects (all but Narwhal and Opera < 10.5). + **/ + argsObject: boolean; + + /** + * Detect if name or message properties of Error.prototype are enumerable by default. + * (IE < 9, Safari < 5.1) + **/ + enumErrorProps: boolean; + + /** + * Detect if prototype properties are enumerable by default. + * + * Firefox < 3.6, Opera > 9.50 - Opera < 11.60, and Safari < 5.1 (if the prototype or a property on the + * prototype has been set) incorrectly set the [[Enumerable]] value of a function’s prototype property to true. + **/ + enumPrototypes: boolean; + + /** + * Detect if Function#bind exists and is inferred to be fast (all but V8). + **/ + fastBind: boolean; + + /** + * Detect if functions can be decompiled by Function#toString (all but PS3 and older Opera + * mobile browsers & avoided in Windows 8 apps). + **/ + funcDecomp: boolean; + + /** + * Detect if Function#name is supported (all but IE). + **/ + funcNames: boolean; + + /** + * Detect if arguments object indexes are non-enumerable (Firefox < 4, IE < 9, PhantomJS, + * Safari < 5.1). + **/ + nonEnumArgs: boolean; + + /** + * Detect if properties shadowing those on Object.prototype are non-enumerable. + * + * In IE < 9 an objects own properties, shadowing non-enumerable ones, are made + * non-enumerable as well (a.k.a the JScript [[DontEnum]] bug). + **/ + nonEnumShadows: boolean; + + /** + * Detect if own properties are iterated after inherited properties (all but IE < 9). + **/ + ownLast: boolean; + + /** + * Detect if Array#shift and Array#splice augment array-like objects correctly. + * + * Firefox < 10, IE compatibility mode, and IE < 9 have buggy Array shift() and splice() + * functions that fail to remove the last element, value[0], of array-like objects even + * though the length property is set to 0. The shift() method is buggy in IE 8 compatibility + * mode, while splice() is buggy regardless of mode in IE < 9 and buggy in compatibility mode + * in IE 9. + **/ + spliceObjects: boolean; + + /** + * Detect lack of support for accessing string characters by index. + * + * IE < 8 can't access characters by index and IE 8 can only access characters by index on + * string literals. + **/ + unindexedChars: boolean; + } + + interface LoDashWrapperBase { + /** + * Produces the toString result of the wrapped value. + * @return Returns the string result. + **/ + toString(): string; + + /** + * Executes the chained sequence to extract the unwrapped value. + * @return Returns the resolved unwrapped value. + **/ + value(): T; + + /** + * @see _.value + **/ + run(): T; + + /** + * @see _.value + **/ + toJSON(): T; + + /** + * @see _.value + **/ + valueOf(): T; + + /** + * @see _.toPlainObject + */ + toPlainObject(): Object; + } + + interface LoDashWrapper extends LoDashWrapperBase> { } + + interface LoDashStringWrapper extends LoDashWrapper { } + + interface LoDashObjectWrapper extends LoDashWrapperBase> { } + + interface LoDashArrayWrapper extends LoDashWrapperBase> { + concat(...items: Array>): LoDashArrayWrapper; + join(seperator?: string): string; + pop(): T; + push(...items: T[]): LoDashArrayWrapper; + reverse(): LoDashArrayWrapper; + shift(): T; + slice(start: number, end?: number): LoDashArrayWrapper; + sort(compareFn?: (a: T, b: T) => number): LoDashArrayWrapper; + splice(start: number): LoDashArrayWrapper; + splice(start: number, deleteCount: number, ...items: any[]): LoDashArrayWrapper; + unshift(...items: T[]): LoDashArrayWrapper; + } + + interface LoDashNumberArrayWrapper extends LoDashArrayWrapper { } + + //_.chain + interface LoDashStatic { + /** + * Creates a lodash object that wraps the given value with explicit method chaining enabled. + * @param value The value to wrap. + * @return The wrapper object. + **/ + chain(value: number): LoDashWrapper; + chain(value: string): LoDashWrapper; + chain(value: boolean): LoDashWrapper; + chain(value: Array): LoDashArrayWrapper; + chain(value: T): LoDashObjectWrapper; + chain(value: any): LoDashWrapper; + } + + interface LoDashWrapperBase { + /** + * Enables explicit method chaining on the wrapper object. + * @see _.chain + * @return The wrapper object. + **/ + chain(): TWrapper; + } + + //_.tap + interface LoDashStatic { + /** + * Invokes interceptor with the value as the first argument and then returns value. The + * purpose of this method is to "tap into" a method chain in order to perform operations on + * intermediate results within the chain. + * @param value The value to provide to interceptor + * @param interceptor The function to invoke. + * @return value + **/ + tap( + value: T, + interceptor: (value: T) => void): T; + } + + interface LoDashWrapperBase { + /** + * @see _.tap + **/ + tap(interceptor: (value: T) => void): TWrapper; + } + + /********* + * Arrays * + **********/ + + //_.chunk + interface LoDashStatic { + /** + * Creates an array of elements split into groups the length of size. If collection can't be + * split evenly, the final chunk will be the remaining elements. + * @param array The array to process. + * @param size The length of each chunk. + * @return Returns the new array containing chunks. + **/ + chunk(array: Array, size?: number): T[][]; + + /** + * @see _.chunk + **/ + chunk(array: List, size?: number): T[][]; + } + + interface LoDashArrayWrapper { + /** + * @see _.chunk + **/ + chunk(size?: number): LoDashArrayWrapper; + } + + //_.compact + interface LoDashStatic { + /** + * Returns a copy of the array with all falsy values removed. In JavaScript, false, null, 0, "", + * undefined and NaN are all falsy. + * @param array Array to compact. + * @return (Array) Returns a new array of filtered values. + **/ + compact(array?: Array): T[]; + + /** + * @see _.compact + **/ + compact(array?: List): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.compact + **/ + compact(): LoDashArrayWrapper; + } + + //_.difference + interface LoDashStatic { + /** + * Creates an array excluding all values of the provided arrays using strict equality for comparisons + * , i.e. ===. + * @param array The array to process + * @param others The arrays of values to exclude. + * @return Returns a new array of filtered values. + **/ + difference( + array?: Array, + ...others: Array[]): T[]; + /** + * @see _.difference + **/ + difference( + array?: List, + ...others: List[]): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.difference + **/ + difference( + ...others: Array[]): LoDashArrayWrapper; + /** + * @see _.difference + **/ + difference( + ...others: List[]): LoDashArrayWrapper; + } + + //_.findIndex + interface LoDashStatic { + /** + * This method is like _.find except that it returns the index of the first element that passes + * the callback check, instead of the element itself. + * @param array The array to search. + * @param {(Function|Object|string)} callback The function called per iteration. If a property name or object is provided it will be + * used to create a ".pluck" or ".where" style callback, respectively. + * @param thisArg The this binding of callback. + * @return Returns the index of the found element, else -1. + **/ + findIndex( + array: Array, + callback: ListIterator, + thisArg?: any): number; + + /** + * @see _.findIndex + **/ + findIndex( + array: List, + callback: ListIterator, + thisArg?: any): number; + + /** + * @see _.findIndex + **/ + findIndex( + array: Array, + pluckValue: string): number; + + /** + * @see _.findIndex + **/ + findIndex( + array: List, + pluckValue: string): number; + + /** + * @see _.findIndex + **/ + findIndex( + array: Array, + whereDictionary: W): number; + + /** + * @see _.findIndex + **/ + findIndex( + array: List, + whereDictionary: W): number; + } + + //_.findLastIndex + interface LoDashStatic { + /** + * This method is like _.findIndex except that it iterates over elements of a collection from right to left. + * @param array The array to search. + * @param {(Function|Object|string)} callback The function called per iteration. If a property name or object is provided it will be + * used to create a ".pluck" or ".where" style callback, respectively. + * @param thisArg The this binding of callback. + * @return Returns the index of the found element, else -1. + **/ + findLastIndex( + array: Array, + callback: ListIterator, + thisArg?: any): number; + + /** + * @see _.findLastIndex + **/ + findLastIndex( + array: List, + callback: ListIterator, + thisArg?: any): number; + + /** + * @see _.findLastIndex + **/ + findLastIndex( + array: Array, + pluckValue: string): number; + + /** + * @see _.findLastIndex + **/ + findLastIndex( + array: List, + pluckValue: string): number; + + /** + * @see _.findLastIndex + **/ + findLastIndex( + array: Array, + whereDictionary: Dictionary): number; + + /** + * @see _.findLastIndex + **/ + findLastIndex( + array: List, + whereDictionary: Dictionary): number; + } + + //_.first + interface LoDashStatic { + /** + * Gets the first element or first n elements of an array. If a callback is provided + * elements at the beginning of the array are returned as long as the callback returns + * truey. The callback is bound to thisArg and invoked with three arguments; (value, + * index, array). + * + * If a property name is provided for callback the created "_.pluck" style callback + * will return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return ] + * true for elements that have the properties of the given object, else false. + * @param array Retrieves the first element of this array. + * @return Returns the first element of `array`. + **/ + first(array?: Array): T; + + /** + * @see _.first + **/ + first(array?: List): T; + + /** + * @see _.first + * @param n The number of elements to return. + **/ + first( + array: Array, + n: number): T[]; + + /** + * @see _.first + * @param n The number of elements to return. + **/ + first( + array: List, + n: number): T[]; + + /** + * @see _.first + * @param callback The function called per element. + * @param [thisArg] The this binding of callback. + **/ + first( + array: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.first + * @param callback The function called per element. + * @param [thisArg] The this binding of callback. + **/ + first( + array: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.first + * @param pluckValue "_.pluck" style callback value + **/ + first( + array: Array, + pluckValue: string): T[]; + + /** + * @see _.first + * @param pluckValue "_.pluck" style callback value + **/ + first( + array: List, + pluckValue: string): T[]; + + /** + * @see _.first + * @param whereValue "_.where" style callback value + **/ + first( + array: Array, + whereValue: W): T[]; + + /** + * @see _.first + * @param whereValue "_.where" style callback value + **/ + first( + array: List, + whereValue: W): T[]; + + /** + * @see _.first + **/ + head(array: Array): T; + + /** + * @see _.first + **/ + head(array: List): T; + + /** + * @see _.first + **/ + head( + array: Array, + n: number): T[]; + + /** + * @see _.first + **/ + head( + array: List, + n: number): T[]; + + /** + * @see _.first + **/ + head( + array: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.first + **/ + head( + array: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.first + **/ + head( + array: Array, + pluckValue: string): T[]; + + /** + * @see _.first + **/ + head( + array: List, + pluckValue: string): T[]; + + /** + * @see _.first + **/ + head( + array: Array, + whereValue: W): T[]; + + /** + * @see _.first + **/ + head( + array: List, + whereValue: W): T[]; + + /** + * @see _.first + **/ + take(array: Array): T[]; + + /** + * @see _.first + **/ + take(array: List): T[]; + + /** + * @see _.first + **/ + take( + array: Array, + n: number): T[]; + + /** + * @see _.first + **/ + take( + array: List, + n: number): T[]; + + /** + * Takes the first items from an array or list based on a predicate + * @param array The array or list of items on which the result set will be based + * @param predicate A predicate function to determine whether a value will be taken. Optional; defaults to identity. + * @param [thisArg] The this binding of predicate. + */ + takeWhile( + array: (Array|List), + predicate?: ListIterator, + thisArg?: any + ): T[]; + + /** + * Takes the first items from an array or list based on a predicate + * @param array The array or list of items on which the result set will be based + * @param pluckValue Uses a _.property style callback to return the property value of the given element + */ + takeWhile( + array: (Array|List), + pluckValue: string + ): any[]; + + /** + * Takes the first items from an array or list based on a predicate + * @param array The array or list of items on which the result set will be based + * @param whereValue Uses a _.matches style callback to return the first elements that match the given value + */ + takeWhile( + array: (Array|List), + whereValue: W + ): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.first + **/ + first(): T; + + /** + * @see _.first + * @param n The number of elements to return. + **/ + first(n: number): LoDashArrayWrapper; + + /** + * @see _.first + * @param callback The function called per element. + * @param [thisArg] The this binding of callback. + **/ + first( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.first + * @param pluckValue "_.pluck" style callback value + **/ + first(pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.first + * @param whereValue "_.where" style callback value + **/ + first(whereValue: W): LoDashArrayWrapper; + + /** + * @see _.first + **/ + head(): T; + + /** + * @see _.first + * @param n The number of elements to return. + **/ + head(n: number): LoDashArrayWrapper; + + /** + * @see _.first + * @param callback The function called per element. + * @param [thisArg] The this binding of callback. + **/ + head( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.first + * @param pluckValue "_.pluck" style callback value + **/ + head(pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.first + * @param whereValue "_.where" style callback value + **/ + head(whereValue: W): LoDashArrayWrapper; + + /** + * @see _.first + **/ + take(): LoDashArrayWrapper; + + /** + * @see _.first + * @param n The number of elements to return. + **/ + take(n: number): LoDashArrayWrapper; + + /** + * Takes the first items based on a predicate + * @param predicate The function called per element. + * @param [thisArg] The this binding of callback. + **/ + takeWhile( + predicate: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * Takes the first items based on a predicate + * @param pluckValue Uses a _.property style callback to return the property value of the given element + **/ + takeWhile( + pluckValue: string): LoDashArrayWrapper; + + /** + * Takes the first items based on a predicate + * @param whereValue Uses a _.matches style callback to return the first elements that match the given value + **/ + takeWhile( + whereValue: W): LoDashArrayWrapper; + + } + + interface MaybeNestedList extends List> { } + interface RecursiveList extends List> { } + + //_.flatten + interface LoDashStatic { + /** + * Flattens a nested array a single level. + * + * _.flatten(x) is equivalent to _.flatten(x, false); + * + * @param array The array to flatten. + * @return `array` flattened. + **/ + flatten(array: MaybeNestedList): T[]; + + /** + * Flattens a nested array. If isDeep is true the array is recursively flattened, otherwise it is only + * flattened a single level. + * + * If you know whether or not this should be recursively at compile time, you typically want to use a + * version without a boolean parameter (i.e. `_.flatten(x)` or `_.flattenDeep(x)`). + * + * @param array The array to flatten. + * @param deep Specify a deep flatten. + * @return `array` flattened. + **/ + flatten(array: RecursiveList, isDeep: boolean): List | RecursiveList; + + /** + * Recursively flattens a nested array. + * + * _.flattenDeep(x) is equivalent to _.flatten(x, true); + * + * @param array The array to flatten + * @return `array` recursively flattened + */ + flattenDeep(array: RecursiveList): List + } + + interface LoDashArrayWrapper { + /** + * @see _.flatten + **/ + flatten(): LoDashArrayWrapper; + + /** + * @see _.flatten + **/ + flatten(isShallow: boolean): LoDashArrayWrapper; + + /** + * @see _.flattenDeep + */ + flattenDeep(): LoDashArrayWrapper; + } + + //_.indexOf + interface LoDashStatic { + /** + * Gets the index at which the first occurrence of value is found using strict equality + * for comparisons, i.e. ===. If the array is already sorted providing true for fromIndex + * will run a faster binary search. + * @param array The array to search. + * @param value The value to search for. + * @param fromIndex The index to search from. + * @return The index of `value` within `array`. + **/ + indexOf( + array: Array, + value: T): number; + + /** + * @see _.indexOf + **/ + indexOf( + array: List, + value: T): number; + + /** + * @see _.indexOf + * @param fromIndex The index to search from + **/ + indexOf( + array: Array, + value: T, + fromIndex: number): number; + + /** + * @see _.indexOf + * @param fromIndex The index to search from + **/ + indexOf( + array: List, + value: T, + fromIndex: number): number; + + /** + * @see _.indexOf + * @param isSorted True to perform a binary search on a sorted array. + **/ + indexOf( + array: Array, + value: T, + isSorted: boolean): number; + + /** + * @see _.indexOf + * @param isSorted True to perform a binary search on a sorted array. + **/ + indexOf( + array: List, + value: T, + isSorted: boolean): number; + } + + //_.initial + interface LoDashStatic { + /** + * Gets all but the last element or last n elements of an array. If a callback is provided + * elements at the end of the array are excluded from the result as long as the callback + * returns truey. The callback is bound to thisArg and invoked with three arguments; + * (value, index, array). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false. + * @param array The array to query. + * @param n Leaves this many elements behind, optional. + * @return Returns everything but the last `n` elements of `array`. + **/ + initial( + array: Array): T[]; + + /** + * @see _.initial + **/ + initial( + array: List): T[]; + + /** + * @see _.initial + * @param n The number of elements to exclude. + **/ + initial( + array: Array, + n: number): T[]; + + /** + * @see _.initial + * @param n The number of elements to exclude. + **/ + initial( + array: List, + n: number): T[]; + + /** + * @see _.initial + * @param callback The function called per element + **/ + initial( + array: Array, + callback: ListIterator): T[]; + + /** + * @see _.initial + * @param callback The function called per element + **/ + initial( + array: List, + callback: ListIterator): T[]; + + /** + * @see _.initial + * @param pluckValue _.pluck style callback + **/ + initial( + array: Array, + pluckValue: string): T[]; + + /** + * @see _.initial + * @param pluckValue _.pluck style callback + **/ + initial( + array: List, + pluckValue: string): T[]; + + /** + * @see _.initial + * @param whereValue _.where style callback + **/ + initial( + array: Array, + whereValue: W): T[]; + + /** + * @see _.initial + * @param whereValue _.where style callback + **/ + initial( + array: List, + whereValue: W): T[]; + } + + //_.intersection + interface LoDashStatic { + /** + * Creates an array of unique values present in all provided arrays using strict + * equality for comparisons, i.e. ===. + * @param arrays The arrays to inspect. + * @return Returns an array of composite values. + **/ + intersection(...arrays: Array[]): T[]; + + /** + * @see _.intersection + **/ + intersection(...arrays: List[]): T[]; + } + + //_.last + interface LoDashStatic { + /** + * Gets the last element of an array. + * @param array The array to query. + * @return Returns the last element of array. + **/ + last(array: Array): T; + } + + interface LoDashArrayWrapper { + /** + * @see _.last + **/ + last(): T; + } + + //_.lastIndexOf + interface LoDashStatic { + /** + * Gets the index at which the last occurrence of value is found using strict equality + * for comparisons, i.e. ===. If fromIndex is negative, it is used as the offset from the + * end of the collection. + * @param array The array to search. + * @param value The value to search for. + * @param fromIndex The index to search from. + * @return The index of the matched value or -1. + **/ + lastIndexOf( + array: Array, + value: T, + fromIndex?: number): number; + + /** + * @see _.lastIndexOf + **/ + lastIndexOf( + array: List, + value: T, + fromIndex?: number): number; + } + + //_.pull + interface LoDashStatic { + /** + * Removes all provided values from the given array using strict equality for comparisons, + * i.e. ===. + * @param array The array to modify. + * @param values The values to remove. + * @return array. + **/ + pull( + array: Array, + ...values: T[]): T[]; + + /** + * @see _.pull + **/ + pull( + array: List, + ...values: T[]): T[]; + } + + interface LoDashStatic { + /** + * Removes all provided values from the given array using strict equality for comparisons, + * i.e. ===. + * @param array The array to modify. + * @param values The values to remove. + * @return array. + **/ + pullAt( + array: Array, + ...values: any[]): any[]; + + /** + * @see _.pull + **/ + pullAt( + array: List, + ...values: any[]): any[]; + } + + //_.remove + interface LoDashStatic { + /** + * Removes all elements from an array that the callback returns truey for and returns + * an array of removed elements. The callback is bound to thisArg and invoked with three + * arguments; (value, index, array). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false. + * @param array The array to modify. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return A new array of removed elements. + **/ + remove( + array: Array, + callback?: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.remove + **/ + remove( + array: List, + callback?: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.remove + * @param pluckValue _.pluck style callback + **/ + remove( + array: Array, + pluckValue?: string): T[]; + + /** + * @see _.remove + * @param pluckValue _.pluck style callback + **/ + remove( + array: List, + pluckValue?: string): T[]; + + /** + * @see _.remove + * @param whereValue _.where style callback + **/ + remove( + array: Array, + wherealue?: Dictionary): T[]; + + /** + * @see _.remove + * @param whereValue _.where style callback + **/ + remove( + array: List, + wherealue?: Dictionary): T[]; + + /** + * @see _.remove + * @param item The item to remove + **/ + remove( + array:Array, + item:T): T[]; + } + + //_.rest + interface LoDashStatic { + /** + * The opposite of _.initial this method gets all but the first element or first n elements of + * an array. If a callback function is provided elements at the beginning of the array are excluded + * from the result as long as the callback returns truey. The callback is bound to thisArg and + * invoked with three arguments; (value, index, array). + * + * If a property name is provided for callback the created "_.pluck" style callback will return + * the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return true + * for elements that have the properties of the given object, else false. + * @param array The array to query. + * @param {(Function|Object|number|string)} [callback=1] The function called per element or the number + * of elements to exclude. If a property name or object is provided it will be used to create a + * ".pluck" or ".where" style callback, respectively. + * @param {*} [thisArg] The this binding of callback. + * @return Returns a slice of array. + **/ + rest(array: Array): T[]; + + /** + * @see _.rest + **/ + rest(array: List): T[]; + + /** + * @see _.rest + **/ + rest( + array: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.rest + **/ + rest( + array: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.rest + **/ + rest( + array: Array, + n: number): T[]; + + /** + * @see _.rest + **/ + rest( + array: List, + n: number): T[]; + + /** + * @see _.rest + **/ + rest( + array: Array, + pluckValue: string): T[]; + + /** + * @see _.rest + **/ + rest( + array: List, + pluckValue: string): T[]; + + /** + * @see _.rest + **/ + rest( + array: Array, + whereValue: W): T[]; + + /** + * @see _.rest + **/ + rest( + array: List, + whereValue: W): T[]; + + /** + * @see _.rest + **/ + drop(array: Array): T[]; + + /** + * @see _.rest + **/ + drop(array: List): T[]; + + /** + * @see _.rest + **/ + drop( + array: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.rest + **/ + drop( + array: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.rest + **/ + drop( + array: Array, + n: number): T[]; + + /** + * @see _.rest + **/ + drop( + array: List, + n: number): T[]; + + /** + * @see _.rest + **/ + drop( + array: Array, + pluckValue: string): T[]; + + /** + * @see _.rest + **/ + drop( + array: List, + pluckValue: string): T[]; + + /** + * @see _.rest + **/ + drop( + array: Array, + whereValue: W): T[]; + + /** + * @see _.rest + **/ + drop( + array: List, + whereValue: W): T[]; + + /** + * @see _.rest + **/ + tail(array: Array): T[]; + + /** + * @see _.rest + **/ + tail(array: List): T[]; + + /** + * @see _.rest + **/ + tail( + array: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.rest + **/ + tail( + array: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.rest + **/ + tail( + array: Array, + n: number): T[]; + + /** + * @see _.rest + **/ + tail( + array: List, + n: number): T[]; + + /** + * @see _.rest + **/ + tail( + array: Array, + pluckValue: string): T[]; + + /** + * @see _.rest + **/ + tail( + array: List, + pluckValue: string): T[]; + + /** + * @see _.rest + **/ + tail( + array: Array, + whereValue: W): T[]; + + /** + * @see _.rest + **/ + tail( + array: List, + whereValue: W): T[]; + } + + //_.sortedIndex + interface LoDashStatic { + /** + * Uses a binary search to determine the smallest index at which a value should be inserted + * into a given sorted array in order to maintain the sort order of the array. If a callback + * is provided it will be executed for value and each element of array to compute their sort + * ranking. The callback is bound to thisArg and invoked with one argument; (value). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false. + * @param array The sorted list. + * @param value The value to determine its index within `list`. + * @param callback Iterator to compute the sort ranking of each value, optional. + * @return The index at which value should be inserted into array. + **/ + sortedIndex( + array: Array, + value: T, + callback?: (x: T) => TSort, + thisArg?: any): number; + + /** + * @see _.sortedIndex + **/ + sortedIndex( + array: List, + value: T, + callback?: (x: T) => TSort, + thisArg?: any): number; + + /** + * @see _.sortedIndex + * @param pluckValue the _.pluck style callback + **/ + sortedIndex( + array: Array, + value: T, + pluckValue: string): number; + + /** + * @see _.sortedIndex + * @param pluckValue the _.pluck style callback + **/ + sortedIndex( + array: List, + value: T, + pluckValue: string): number; + + /** + * @see _.sortedIndex + * @param pluckValue the _.where style callback + **/ + sortedIndex( + array: Array, + value: T, + whereValue: W): number; + + /** + * @see _.sortedIndex + * @param pluckValue the _.where style callback + **/ + sortedIndex( + array: List, + value: T, + whereValue: W): number; + } + + //_.union + interface LoDashStatic { + /** + * Creates an array of unique values, in order, of the provided arrays using strict + * equality for comparisons, i.e. ===. + * @param arrays The arrays to inspect. + * @return Returns an array of composite values. + **/ + union(...arrays: Array[]): T[]; + + /** + * @see _.union + **/ + union(...arrays: List[]): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.union + **/ + union(...arrays: (Array|List)[]): LoDashArrayWrapper; + } + + //_.uniq + interface LoDashStatic { + /** + * Creates a duplicate-value-free version of an array using strict equality for comparisons, + * i.e. ===. If the array is sorted, providing true for isSorted will use a faster algorithm. + * If a callback is provided each element of array is passed through the callback before + * uniqueness is computed. The callback is bound to thisArg and invoked with three arguments; + * (value, index, array). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false. + * @param array Array to remove duplicates from. + * @param isSorted True if `array` is already sorted, optiona, default = false. + * @param iterator Transform the elements of `array` before comparisons for uniqueness. + * @param context 'this' object in `iterator`, optional. + * @return Copy of `array` where all elements are unique. + **/ + uniq(array: Array, isSorted?: boolean): T[]; + + /** + * @see _.uniq + **/ + uniq(array: List, isSorted?: boolean): T[]; + + /** + * @see _.uniq + **/ + uniq( + array: Array, + isSorted: boolean, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.uniq + **/ + uniq( + array: List, + isSorted: boolean, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.uniq + **/ + uniq( + array: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.uniq + **/ + uniq( + array: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + uniq( + array: Array, + isSorted: boolean, + pluckValue: string): T[]; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + uniq( + array: List, + isSorted: boolean, + pluckValue: string): T[]; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + uniq( + array: Array, + pluckValue: string): T[]; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + uniq( + array: List, + pluckValue: string): T[]; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + uniq( + array: Array, + isSorted: boolean, + whereValue: W): T[]; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + uniq( + array: List, + isSorted: boolean, + whereValue: W): T[]; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + uniq( + array: Array, + whereValue: W): T[]; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + uniq( + array: List, + whereValue: W): T[]; + + /** + * @see _.uniq + **/ + unique(array: Array, isSorted?: boolean): T[]; + + /** + * @see _.uniq + **/ + unique(array: List, isSorted?: boolean): T[]; + + /** + * @see _.uniq + **/ + unique( + array: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.uniq + **/ + unique( + array: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.uniq + **/ + unique( + array: Array, + isSorted: boolean, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.uniq + **/ + unique( + array: List, + isSorted: boolean, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + unique( + array: Array, + isSorted: boolean, + pluckValue: string): T[]; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + unique( + array: List, + isSorted: boolean, + pluckValue: string): T[]; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + unique( + array: Array, + pluckValue: string): T[]; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + unique( + array: List, + pluckValue: string): T[]; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + unique( + array: Array, + whereValue?: W): T[]; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + unique( + array: List, + whereValue?: W): T[]; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + unique( + array: Array, + isSorted: boolean, + whereValue?: W): T[]; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + unique( + array: List, + isSorted: boolean, + whereValue?: W): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.uniq + **/ + uniq(isSorted?: boolean): LoDashArrayWrapper; + + /** + * @see _.uniq + **/ + uniq( + isSorted: boolean, + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.uniq + **/ + uniq( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + uniq( + isSorted: boolean, + pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + uniq(pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + uniq( + isSorted: boolean, + whereValue: W): LoDashArrayWrapper; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + uniq( + whereValue: W): LoDashArrayWrapper; + + /** + * @see _.uniq + **/ + unique(isSorted?: boolean): LoDashArrayWrapper; + + /** + * @see _.uniq + **/ + unique( + isSorted: boolean, + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.uniq + **/ + unique( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + unique( + isSorted: boolean, + pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.uniq + * @param pluckValue _.pluck style callback + **/ + unique(pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + unique( + isSorted: boolean, + whereValue: W): LoDashArrayWrapper; + + /** + * @see _.uniq + * @param whereValue _.where style callback + **/ + unique( + whereValue: W): LoDashArrayWrapper; + } + + //_.without + interface LoDashStatic { + /** + * Creates an array excluding all provided values using strict equality for comparisons, i.e. ===. + * @param array The array to filter. + * @param values The value(s) to exclude. + * @return A new array of filtered values. + **/ + without( + array: Array, + ...values: T[]): T[]; + + /** + * @see _.without + **/ + without( + array: List, + ...values: T[]): T[]; + } + + //_.xor + interface LoDashStatic { + /** + * Creates an array of unique values that is the symmetric difference of the provided arrays. + * @param arrays The arrays to inspect. + * @return Returns the new array of values. + */ + xor(...arrays: List[]): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.xor + */ + xor(...arrays: T[][]): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.xor + */ + xor(...arrays: T[]): LoDashObjectWrapper; + } + + //_.zip + interface LoDashStatic { + /** + * Creates an array of grouped elements, the first of which contains the first + * elements of the given arrays, the second of which contains the second elements + * of the given arrays, and so on. + * @param arrays Arrays to process. + * @return A new array of grouped elements. + **/ + zip(...arrays: any[][]): any[][]; + + /** + * @see _.zip + **/ + zip(...arrays: any[]): any[]; + + /** + * @see _.zip + **/ + unzip(...arrays: any[][]): any[][]; + + /** + * @see _.zip + **/ + unzip(...arrays: any[]): any[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.zip + **/ + zip(...arrays: any[][]): _.LoDashArrayWrapper; + + /** + * @see _.zip + **/ + unzip(...arrays: any[]): _.LoDashArrayWrapper; + } + + //_.zipObject + interface LoDashStatic { + /** + * The inverse of _.pairs; this method returns an object composed from arrays of property + * names and values. Provide either a single two dimensional array, e.g. [[key1, value1], + * [key2, value2]] or two arrays, one of property names and one of corresponding values. + * @param props The property names. + * @param values The property values. + * @return Returns the new object. + **/ + zipObject( + props: List, + values?: List): TResult; + + /** + * @see _.zipObject + **/ + zipObject(props: List>): Dictionary; + + /** + * @see _.zipObject + **/ + object( + props: List, + values?: List): TResult; + + /** + * @see _.zipObject + **/ + object(props: List>): Dictionary; + } + + interface LoDashArrayWrapper { + /** + * @see _.zipObject + **/ + zipObject(values?: List): _.LoDashObjectWrapper>; + + /** + * @see _.zipObject + **/ + object(values?: List): _.LoDashObjectWrapper>; + } + + //_.zipWith + interface LoDashStatic { + /** + * This method is like _.zip except that it accepts an iteratee to specify how grouped values should be + * combined. The iteratee is bound to thisArg and invoked with four arguments: (accumulator, value, index, + * group). + * @param {...Array} [arrays] The arrays to process. + * @param {Function} [iteratee] The function to combine grouped values. + * @param {*} [thisArg] The `this` binding of `iteratee`. + * @return Returns the new array of grouped elements. + */ + zipWith(...args: any[]): TResult[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.zipWith + */ + zipWith(...args: any[]): LoDashArrayWrapper; + } + + /********* + * Chain * + *********/ + + //_.thru + interface LoDashStatic { + /** + * This method is like _.tap except that it returns the result of interceptor. + * @param value The value to provide to interceptor. + * @param interceptor The function to invoke. + * @param thisArg The this binding of interceptor. + * @return Returns the result of interceptor. + */ + thru( + value: T, + interceptor: (value: T) => TResult, + thisArg?: any): TResult; + } + + interface LoDashWrapperBase { + /** + * @see _.thru + */ + thru( + interceptor: (value: T) => TResult, + thisArg?: any): LoDashWrapper; + + /** + * @see _.thru + */ + thru( + interceptor: (value: T) => TResult, + thisArg?: any): LoDashWrapper; + + /** + * @see _.thru + */ + thru( + interceptor: (value: T) => TResult, + thisArg?: any): LoDashWrapper; + + /** + * @see _.thru + */ + thru( + interceptor: (value: T) => TResult, + thisArg?: any): LoDashObjectWrapper; + + /** + * @see _.thru + */ + thru( + interceptor: (value: T) => TResult[], + thisArg?: any): LoDashArrayWrapper; + } + + /************** + * Collection * + **************/ + + //_.at + interface LoDashStatic { + /** + * Creates an array of elements corresponding to the given keys, or indexes, of collection. Keys may be + * specified as individual arguments or as arrays of keys. + * + * @param collection The collection to iterate over. + * @param props The property names or indexes of elements to pick, specified individually or in arrays. + * @return Returns the new array of picked elements. + */ + at( + collection: List|Dictionary, + ...props: Array> + ): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.at + */ + at(...props: Array>): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.at + */ + at(...props: Array>): LoDashArrayWrapper; + } + + //_.contains + interface LoDashStatic { + /** + * Checks if a given value is present in a collection using strict equality for comparisons, + * i.e. ===. If fromIndex is negative, it is used as the offset from the end of the collection. + * @param collection The collection to iterate over. + * @param target The value to check for. + * @param fromIndex The index to search from. + * @return True if the target element is found, else false. + **/ + contains( + collection: Array, + target: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + contains( + collection: List, + target: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + * @param dictionary The dictionary to iterate over. + * @param value The value in the dictionary to search for. + **/ + contains( + dictionary: Dictionary, + value: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + * @param searchString the string to search + * @param targetString the string to search for + **/ + contains( + searchString: string, + targetString: string, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + include( + collection: Array, + target: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + include( + collection: List, + target: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + include( + dictionary: Dictionary, + value: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + include( + searchString: string, + targetString: string, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + includes( + collection: Array, + target: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + includes( + collection: List, + target: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + includes( + dictionary: Dictionary, + value: T, + fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + includes( + searchString: string, + targetString: string, + fromIndex?: number): boolean; + } + + interface LoDashArrayWrapper { + /** + * @see _.contains + **/ + contains(target: T, fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + include(target: T, fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + includes(target: T, fromIndex?: number): boolean; + } + + interface LoDashObjectWrapper { + /** + * @see _.contains + **/ + contains(target: TValue, fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + include(target: TValue, fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + includes(target: TValue, fromIndex?: number): boolean; + } + + interface LoDashStringWrapper { + /** + * @see _.contains + **/ + contains(target: string, fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + include(target: string, fromIndex?: number): boolean; + + /** + * @see _.contains + **/ + includes(target: string, fromIndex?: number): boolean; + } + + //_.countBy + interface LoDashStatic { + /** + * Creates an object composed of keys generated from the results of running each element + * of collection through the callback. The corresponding value of each key is the number + * of times the key was returned by the callback. The callback is bound to thisArg and + * invoked with three arguments; (value, index|key, collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return Returns the composed aggregate object. + **/ + countBy( + collection: Array, + callback?: ListIterator, + thisArg?: any): Dictionary; + + /** + * @see _.countBy + * @param callback Function name + **/ + countBy( + collection: List, + callback?: ListIterator, + thisArg?: any): Dictionary; + + /** + * @see _.countBy + * @param callback Function name + **/ + countBy( + collection: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): Dictionary; + + /** + * @see _.countBy + * @param callback Function name + **/ + countBy( + collection: Array, + callback: string, + thisArg?: any): Dictionary; + + /** + * @see _.countBy + * @param callback Function name + **/ + countBy( + collection: List, + callback: string, + thisArg?: any): Dictionary; + + /** + * @see _.countBy + * @param callback Function name + **/ + countBy( + collection: Dictionary, + callback: string, + thisArg?: any): Dictionary; + } + + interface LoDashArrayWrapper { + /** + * @see _.countBy + **/ + countBy( + callback?: ListIterator, + thisArg?: any): LoDashObjectWrapper>; + + /** + * @see _.countBy + * @param callback Function name + **/ + countBy( + callback: string, + thisArg?: any): LoDashObjectWrapper>; + } + + //_.every + interface LoDashStatic { + /** + * Checks if the given callback returns truey value for all elements of a collection. + * The callback is bound to thisArg and invoked with three arguments; (value, index|key, + * collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return True if all elements passed the callback check, else false. + **/ + every( + collection: Array, + callback?: ListIterator, + thisArg?: any): boolean; + + /** + * @see _.every + * @param pluckValue _.pluck style callback + **/ + every( + collection: List, + callback?: ListIterator, + thisArg?: any): boolean; + + /** + * @see _.every + * @param pluckValue _.pluck style callback + **/ + every( + collection: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): boolean; + + /** + * @see _.every + * @param pluckValue _.pluck style callback + **/ + every( + collection: Array, + pluckValue: string): boolean; + + /** + * @see _.every + * @param pluckValue _.pluck style callback + **/ + every( + collection: List, + pluckValue: string): boolean; + + /** + * @see _.every + * @param pluckValue _.pluck style callback + **/ + every( + collection: Dictionary, + pluckValue: string): boolean; + + /** + * @see _.every + * @param whereValue _.where style callback + **/ + every( + collection: Array, + whereValue: W): boolean; + + /** + * @see _.every + * @param whereValue _.where style callback + **/ + every( + collection: List, + whereValue: W): boolean; + + /** + * @see _.every + * @param whereValue _.where style callback + **/ + every( + collection: Dictionary, + whereValue: W): boolean; + + /** + * @see _.every + **/ + all( + collection: Array, + callback?: ListIterator, + thisArg?: any): boolean; + + /** + * @see _.every + **/ + all( + collection: List, + callback?: ListIterator, + thisArg?: any): boolean; + + /** + * @see _.every + **/ + all( + collection: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): boolean; + + /** + * @see _.every + * @param pluckValue _.pluck style callback + **/ + all( + collection: Array, + pluckValue: string): boolean; + + /** + * @see _.every + * @param pluckValue _.pluck style callback + **/ + all( + collection: List, + pluckValue: string): boolean; + + /** + * @see _.every + * @param pluckValue _.pluck style callback + **/ + all( + collection: Dictionary, + pluckValue: string): boolean; + + /** + * @see _.every + * @param whereValue _.where style callback + **/ + all( + collection: Array, + whereValue: W): boolean; + + /** + * @see _.every + * @param whereValue _.where style callback + **/ + all( + collection: List, + whereValue: W): boolean; + + /** + * @see _.every + * @param whereValue _.where style callback + **/ + all( + collection: Dictionary, + whereValue: W): boolean; + } + + //_.fill + interface LoDashStatic { + /** + * Fills elements of array with value from start up to, but not including, end. + * + * Note: This method mutates array. + * + * @param array (Array): The array to fill. + * @param value (*): The value to fill array with. + * @param [start=0] (number): The start position. + * @param [end=array.length] (number): The end position. + * @return (Array): Returns array. + */ + fill( + array: any[], + value: any, + start?: number, + end?: number): TResult[]; + + /** + * @see _.fill + */ + fill( + array: List, + value: any, + start?: number, + end?: number): List; + } + + interface LoDashArrayWrapper { + /** + * @see _.fill + */ + fill( + value: TResult, + start?: number, + end?: number): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.fill + */ + fill( + value: TResult, + start?: number, + end?: number): LoDashObjectWrapper>; + } + + //_.filter + interface LoDashStatic { + /** + * Iterates over elements of a collection, returning an array of all elements the + * identity function returns truey for. + * + * @param collection The collection to iterate over. + * @return Returns a new array of elements that passed the callback check. + **/ + filter( + collection: (Array|List)): T[]; + + /** + * Iterates over elements of a collection, returning an array of all elements the + * callback returns truey for. The callback is bound to thisArg and invoked with three + * arguments; (value, index|key, collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param context The this binding of callback. + * @return Returns a new array of elements that passed the callback check. + **/ + filter( + collection: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.filter + **/ + filter( + collection: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.filter + **/ + filter( + collection: Dictionary, + callback: DictionaryIterator, + thisArg?: any): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + filter( + collection: Array, + pluckValue: string): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + filter( + collection: List, + pluckValue: string): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + filter( + collection: Dictionary, + pluckValue: string): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + filter( + collection: Array, + whereValue: W): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + filter( + collection: List, + whereValue: W): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + filter( + collection: Dictionary, + whereValue: W): T[]; + + /** + * @see _.filter + **/ + select( + collection: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.filter + **/ + select( + collection: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.filter + **/ + select( + collection: Dictionary, + callback: DictionaryIterator, + thisArg?: any): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + select( + collection: Array, + pluckValue: string): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + select( + collection: List, + pluckValue: string): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + select( + collection: Dictionary, + pluckValue: string): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + select( + collection: Array, + whereValue: W): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + select( + collection: List, + whereValue: W): T[]; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + select( + collection: Dictionary, + whereValue: W): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.filter + **/ + filter(): LoDashArrayWrapper; + + /** + * @see _.filter + **/ + filter( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + filter( + pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + filter( + whereValue: W): LoDashArrayWrapper; + + /** + * @see _.filter + **/ + select( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + select( + pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.filter + * @param pluckValue _.pluck style callback + **/ + select( + whereValue: W): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.filter + **/ + filter( + callback: ObjectIterator, + thisArg?: any): LoDashObjectWrapper; + } + + //_.find + interface LoDashStatic { + /** + * Iterates over elements of collection, returning the first element predicate returns + * truthy for. The predicate is bound to thisArg and invoked with three arguments: + * (value, index|key, collection). + * + * If a property name is provided for predicate the created _.property style callback + * returns the property value of the given element. + * + * If a value is also provided for thisArg the created _.matchesProperty style callback + * returns true for elements that have a matching property value, else false. + * + * If an object is provided for predicate the created _.matches style callback returns + * true for elements that have the properties of the given object, else false. + * + * @param collection Searches for a value in this list. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return The found element, else undefined. + **/ + find( + collection: Array, + callback: ListIterator, + thisArg?: any): T; + + /** + * Alias of _.find + * @see _.find + **/ + detect( + collection: Array, + callback: ListIterator, + thisArg?: any): T; + + /** + * @see _.find + **/ + find( + collection: List, + callback: ListIterator, + thisArg?: any): T; + + /** + * Alias of _.find + * @see _.find + **/ + detect( + collection: List, + callback: ListIterator, + thisArg?: any): T; + + /** + * @see _.find + **/ + find( + collection: Dictionary, + callback: DictionaryIterator, + thisArg?: any): T; + + /** + * Alias of _.find + * @see _.find + **/ + detect( + collection: Dictionary, + callback: DictionaryIterator, + thisArg?: any): T; + + /** + * @see _.find + * @param _.matches style callback + **/ + find( + collection: Array|List|Dictionary, + whereValue: W): T; + + /** + * Alias of _.find + * @see _.find + * @param _.matches style callback + **/ + detect( + collection: Array|List|Dictionary, + whereValue: W): T; + + /** + * @see _.find + * @param _.matchesProperty style callback + **/ + find( + collection: Array|List|Dictionary, + path: string, + srcValue: any): T; + + /** + * Alias of _.find + * @see _.find + * @param _.matchesProperty style callback + **/ + detect( + collection: Array|List|Dictionary, + path: string, + srcValue: any): T; + + /** + * @see _.find + * @param _.property style callback + **/ + find( + collection: Array|List|Dictionary, + pluckValue: string): T; + + /** + * Alias of _.find + * @see _.find + * @param _.property style callback + **/ + detect( + collection: Array|List|Dictionary, + pluckValue: string): T; + + /** + * @see _.find + **/ + findWhere( + collection: Array, + callback: ListIterator, + thisArg?: any): T; + + /** + * @see _.find + **/ + findWhere( + collection: List, + callback: ListIterator, + thisArg?: any): T; + + /** + * @see _.find + **/ + findWhere( + collection: Dictionary, + callback: DictionaryIterator, + thisArg?: any): T; + + /** + * @see _.find + * @param _.matches style callback + **/ + findWhere( + collection: Array, + whereValue: W): T; + + /** + * @see _.find + * @param _.matches style callback + **/ + findWhere( + collection: List, + whereValue: W): T; + + /** + * @see _.find + * @param _.matches style callback + **/ + findWhere( + collection: Dictionary, + whereValue: W): T; + + /** + * @see _.find + * @param _.property style callback + **/ + findWhere( + collection: Array, + pluckValue: string): T; + + /** + * @see _.find + * @param _.property style callback + **/ + findWhere( + collection: List, + pluckValue: string): T; + + /** + * @see _.find + * @param _.property style callback + **/ + findWhere( + collection: Dictionary, + pluckValue: string): T; + } + + interface LoDashArrayWrapper { + /** + * @see _.find + */ + find( + callback: ListIterator, + thisArg?: any): T; + /** + * @see _.find + * @param _.matches style callback + */ + find( + whereValue: W): T; + /** + * @see _.find + * @param _.matchesProperty style callback + */ + find( + path: string, + srcValue: any): T; + /** + * @see _.find + * @param _.property style callback + */ + find( + pluckValue: string): T; + } + + //_.findLast + interface LoDashStatic { + /** + * This method is like _.find except that it iterates over elements of a collection from + * right to left. + * @param collection Searches for a value in this list. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return The found element, else undefined. + **/ + findLast( + collection: Array, + callback: ListIterator, + thisArg?: any): T; + + /** + * @see _.find + **/ + findLast( + collection: List, + callback: ListIterator, + thisArg?: any): T; + + /** + * @see _.find + **/ + findLast( + collection: Dictionary, + callback: DictionaryIterator, + thisArg?: any): T; + + /** + * @see _.find + * @param _.pluck style callback + **/ + findLast( + collection: Array, + whereValue: W): T; + + /** + * @see _.find + * @param _.pluck style callback + **/ + findLast( + collection: List, + whereValue: W): T; + + /** + * @see _.find + * @param _.pluck style callback + **/ + findLast( + collection: Dictionary, + whereValue: W): T; + + /** + * @see _.find + * @param _.where style callback + **/ + findLast( + collection: Array, + pluckValue: string): T; + + /** + * @see _.find + * @param _.where style callback + **/ + findLast( + collection: List, + pluckValue: string): T; + + /** + * @see _.find + * @param _.where style callback + **/ + findLast( + collection: Dictionary, + pluckValue: string): T; + } + + interface LoDashArrayWrapper { + /** + * @see _.findLast + */ + findLast( + callback: ListIterator, + thisArg?: any): T; + /** + * @see _.findLast + * @param _.where style callback + */ + findLast( + whereValue: W): T; + + /** + * @see _.findLast + * @param _.where style callback + */ + findLast( + pluckValue: string): T; + } + + //_.forEach + interface LoDashStatic { + /** + * Iterates over elements of a collection, executing the callback for each element. + * The callback is bound to thisArg and invoked with three arguments; (value, index|key, + * collection). Callbacks may exit iteration early by explicitly returning false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + **/ + forEach( + collection: Array, + callback: ListIterator, + thisArg?: any): Array; + + /** + * @see _.forEach + **/ + forEach( + collection: List, + callback: ListIterator, + thisArg?: any): List; + + /** + * @see _.forEach + **/ + forEach( + object: Dictionary, + callback: DictionaryIterator, + thisArg?: any): Dictionary; + + /** + * @see _.each + **/ + forEach( + object: T, + callback: ObjectIterator, + thisArg?: any): T + + /** + * @see _.forEach + **/ + each( + collection: Array, + callback: ListIterator, + thisArg?: any): Array; + + /** + * @see _.forEach + **/ + each( + collection: List, + callback: ListIterator, + thisArg?: any): List; + + /** + * @see _.forEach + * @param object The object to iterate over + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + **/ + each( + object: Dictionary, + callback: DictionaryIterator, + thisArg?: any): Dictionary; + + /** + * @see _.each + **/ + each( + object: T, + callback: ObjectIterator, + thisArg?: any): T + } + + interface LoDashArrayWrapper { + /** + * @see _.forEach + **/ + forEach( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.forEach + **/ + each( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.forEach + **/ + forEach( + callback: ObjectIterator, + thisArg?: any): LoDashObjectWrapper; + + /** + * @see _.forEach + **/ + each( + callback: ObjectIterator, + thisArg?: any): LoDashObjectWrapper; + } + + //_.forEachRight + interface LoDashStatic { + /** + * This method is like _.forEach except that it iterates over elements of a + * collection from right to left. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + **/ + forEachRight( + collection: Array, + callback: ListIterator, + thisArg?: any): Array; + + /** + * @see _.forEachRight + **/ + forEachRight( + collection: List, + callback: ListIterator, + thisArg?: any): List; + + /** + * @see _.forEachRight + **/ + forEachRight( + object: Dictionary, + callback: DictionaryIterator, + thisArg?: any): Dictionary; + + /** + * @see _.forEachRight + **/ + eachRight( + collection: Array, + callback: ListIterator, + thisArg?: any): Array; + + /** + * @see _.forEachRight + **/ + eachRight( + collection: List, + callback: ListIterator, + thisArg?: any): List; + + /** + * @see _.forEachRight + * @param object The object to iterate over + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + **/ + eachRight( + object: Dictionary, + callback: DictionaryIterator, + thisArg?: any): Dictionary; + } + + interface LoDashArrayWrapper { + /** + * @see _.forEachRight + **/ + forEachRight( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.forEachRight + **/ + eachRight( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.forEachRight + **/ + forEachRight( + callback: ObjectIterator, + thisArg?: any): LoDashObjectWrapper>; + + /** + * @see _.forEachRight + * @param object The object to iterate over + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + **/ + eachRight( + callback: ObjectIterator, + thisArg?: any): LoDashObjectWrapper>; + } + + //_.groupBy + interface LoDashStatic { + /** + * Creates an object composed of keys generated from the results of running each element + * of a collection through the callback. The corresponding value of each key is an array + * of the elements responsible for generating the key. The callback is bound to thisArg + * and invoked with three arguments; (value, index|key, collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return Returns the composed aggregate object. + **/ + groupBy( + collection: Array, + callback?: ListIterator, + thisArg?: any): Dictionary; + + /** + * @see _.groupBy + **/ + groupBy( + collection: List, + callback?: ListIterator, + thisArg?: any): Dictionary; + + /** + * @see _.groupBy + * @param pluckValue _.pluck style callback + **/ + groupBy( + collection: Array, + pluckValue: string): Dictionary; + + /** + * @see _.groupBy + * @param pluckValue _.pluck style callback + **/ + groupBy( + collection: List, + pluckValue: string): Dictionary; + + /** + * @see _.groupBy + * @param whereValue _.where style callback + **/ + groupBy( + collection: Array, + whereValue: W): Dictionary; + + /** + * @see _.groupBy + * @param whereValue _.where style callback + **/ + groupBy( + collection: List, + whereValue: W): Dictionary; + + /** + * @see _.groupBy + **/ + groupBy( + collection: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): Dictionary; + + /** + * @see _.groupBy + * @param pluckValue _.pluck style callback + **/ + groupBy( + collection: Dictionary, + pluckValue: string): Dictionary; + + /** + * @see _.groupBy + * @param whereValue _.where style callback + **/ + groupBy( + collection: Dictionary, + whereValue: W): Dictionary; + } + + interface LoDashArrayWrapper { + /** + * @see _.groupBy + **/ + groupBy( + callback: ListIterator, + thisArg?: any): _.LoDashObjectWrapper<_.Dictionary>; + + /** + * @see _.groupBy + **/ + groupBy( + pluckValue: string): _.LoDashObjectWrapper<_.Dictionary>; + + /** + * @see _.groupBy + **/ + groupBy( + whereValue: W): _.LoDashObjectWrapper<_.Dictionary>; + } + + interface LoDashObjectWrapper { + /** + * @see _.groupBy + **/ + groupBy( + callback: ListIterator, + thisArg?: any): _.LoDashObjectWrapper<_.Dictionary>; + + /** + * @see _.groupBy + **/ + groupBy( + pluckValue: string): _.LoDashObjectWrapper<_.Dictionary>; + + /** + * @see _.groupBy + **/ + groupBy( + whereValue: W): _.LoDashObjectWrapper<_.Dictionary>; + } + + //_.indexBy + interface LoDashStatic { + /** + * Creates an object composed of keys generated from the results of running each element + * of the collection through the given callback. The corresponding value of each key is + * the last element responsible for generating the key. The callback is bound to thisArg + * and invoked with three arguments; (value, index|key, collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return + * true for elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return Returns the composed aggregate object. + **/ + indexBy( + list: Array, + iterator: ListIterator, + context?: any): Dictionary; + + /** + * @see _.indexBy + **/ + indexBy( + list: List, + iterator: ListIterator, + context?: any): Dictionary; + + /** + * @see _.indexBy + * @param pluckValue _.pluck style callback + **/ + indexBy( + collection: Array, + pluckValue: string): Dictionary; + + /** + * @see _.indexBy + * @param pluckValue _.pluck style callback + **/ + indexBy( + collection: List, + pluckValue: string): Dictionary; + + /** + * @see _.indexBy + * @param whereValue _.where style callback + **/ + indexBy( + collection: Array, + whereValue: W): Dictionary; + + /** + * @see _.indexBy + * @param whereValue _.where style callback + **/ + indexBy( + collection: List, + whereValue: W): Dictionary; + } + + //_.invoke + interface LoDashStatic { + /** + * Invokes the method named by methodName on each element in the collection returning + * an array of the results of each invoked method. Additional arguments will be provided + * to each invoked method. If methodName is a function it will be invoked for, and this + * bound to, each element in the collection. + * @param collection The collection to iterate over. + * @param methodName The name of the method to invoke. + * @param args Arguments to invoke the method with. + **/ + invoke( + collection: Array, + methodName: string, + ...args: any[]): any; + + /** + * @see _.invoke + **/ + invoke( + collection: List, + methodName: string, + ...args: any[]): any; + + /** + * @see _.invoke + **/ + invoke( + collection: Dictionary, + methodName: string, + ...args: any[]): any; + + /** + * @see _.invoke + **/ + invoke( + collection: Array, + method: Function, + ...args: any[]): any; + + /** + * @see _.invoke + **/ + invoke( + collection: List, + method: Function, + ...args: any[]): any; + + /** + * @see _.invoke + **/ + invoke( + collection: Dictionary, + method: Function, + ...args: any[]): any; + } + + //_.map + interface LoDashStatic { + /** + * Creates an array of values by running each element in the collection through the callback. + * The callback is bound to thisArg and invoked with three arguments; (value, index|key, + * collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will return + * the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return true + * for elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param theArg The this binding of callback. + * @return The mapped array result. + **/ + map( + collection: Array, + callback: ListIterator, + thisArg?: any): TResult[]; + + /** + * @see _.map + **/ + map( + collection: List, + callback: ListIterator, + thisArg?: any): TResult[]; + + /** + * @see _.map + * @param object The object to iterate over. + * @param callback The function called per iteration. + * @param thisArg `this` object in `iterator`, optional. + * @return The mapped object result. + **/ + map( + object: Dictionary, + callback: DictionaryIterator, + thisArg?: any): TResult[]; + + /** + * @see _.map + * @param pluckValue _.pluck style callback + **/ + map( + collection: Array, + pluckValue: string): TResult[]; + + /** + * @see _.map + * @param pluckValue _.pluck style callback + **/ + map( + collection: List, + pluckValue: string): TResult[]; + + /** + * @see _.map + **/ + collect( + collection: Array, + callback: ListIterator, + thisArg?: any): TResult[]; + + /** + * @see _.map + **/ + collect( + collection: List, + callback: ListIterator, + thisArg?: any): TResult[]; + + /** + * @see _.map + **/ + collect( + object: Dictionary, + callback: DictionaryIterator, + thisArg?: any): TResult[]; + + /** + * @see _.map + **/ + collect( + collection: Array, + pluckValue: string): TResult[]; + + /** + * @see _.map + **/ + collect( + collection: List, + pluckValue: string): TResult[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.map + **/ + map( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.map + * @param pluckValue _.pluck style callback + **/ + map( + pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.map + **/ + collect( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.map + **/ + collect( + pluckValue: string): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.map + **/ + map( + callback: ObjectIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.map + **/ + collect( + callback: ObjectIterator, + thisArg?: any): LoDashArrayWrapper; + } + + //_.ceil + interface LoDashStatic { + /** + * Calculates n rounded up to precision. + * @param n The number to round up. + * @param precision The precision to round up to. + * @return Returns the rounded up number. + */ + ceil(n: number, precision?: number): number; + } + + interface LoDashWrapper { + /** + * @see _.ceil + */ + ceil(precision?: number): number; + } + + //_.floor + interface LoDashStatic { + /** + * Calculates n rounded down to precision. + * @param n The number to round down. + * @param precision The precision to round down to. + * @return Returns the rounded down number. + */ + floor(n: number, precision?: number): number; + } + + interface LoDashWrapper { + /** + * @see _.floor + */ + floor(precision?: number): number; + } + + //_.max + interface LoDashStatic { + /** + * Retrieves the maximum value of a collection. If the collection is empty or falsey -Infinity is + * returned. If a callback is provided it will be executed for each value in the collection to + * generate the criterion by which the value is ranked. The callback is bound to thisArg and invoked + * with three arguments; (value, index, collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will return the + * property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return true for + * elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return Returns the maximum value. + **/ + max( + collection: Array, + callback?: ListIterator, + thisArg?: any): T; + + /** + * @see _.max + **/ + max( + collection: List, + callback?: ListIterator, + thisArg?: any): T; + + /** + * @see _.max + **/ + max( + collection: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): T; + + /** + * @see _.max + * @param pluckValue _.pluck style callback + **/ + max( + collection: Array, + pluckValue: string): T; + + /** + * @see _.max + * @param pluckValue _.pluck style callback + **/ + max( + collection: List, + pluckValue: string): T; + + /** + * @see _.max + * @param pluckValue _.pluck style callback + **/ + max( + collection: Dictionary, + pluckValue: string): T; + + /** + * @see _.max + * @param whereValue _.where style callback + **/ + max( + collection: Array, + whereValue: W): T; + + /** + * @see _.max + * @param whereValue _.where style callback + **/ + max( + collection: List, + whereValue: W): T; + + /** + * @see _.max + * @param whereValue _.where style callback + **/ + max( + collection: Dictionary, + whereValue: W): T; + } + + interface LoDashArrayWrapper { + /** + * @see _.max + **/ + max( + callback?: ListIterator, + thisArg?: any): LoDashWrapper; + + /** + * @see _.max + * @param pluckValue _.pluck style callback + **/ + max( + pluckValue: string): LoDashWrapper; + + /** + * @see _.max + * @param whereValue _.where style callback + **/ + max( + whereValue: W): LoDashWrapper; + } + + //_.min + interface LoDashStatic { + /** + * Retrieves the minimum value of a collection. If the collection is empty or falsey + * Infinity is returned. If a callback is provided it will be executed for each value + * in the collection to generate the criterion by which the value is ranked. The callback + * is bound to thisArg and invoked with three arguments; (value, index, collection). + * + * If a property name is provided for callback the created "_.pluck" style callback + * will return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will + * return true for elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return Returns the maximum value. + **/ + min( + collection: Array, + callback?: ListIterator, + thisArg?: any): T; + + /** + * @see _.min + **/ + min( + collection: List, + callback?: ListIterator, + thisArg?: any): T; + + /** + * @see _.min + **/ + min( + collection: Dictionary, + callback?: ListIterator, + thisArg?: any): T; + + /** + * @see _.min + * @param pluckValue _.pluck style callback + **/ + min( + collection: Array, + pluckValue: string): T; + + /** + * @see _.min + * @param pluckValue _.pluck style callback + **/ + min( + collection: List, + pluckValue: string): T; + + /** + * @see _.min + * @param pluckValue _.pluck style callback + **/ + min( + collection: Dictionary, + pluckValue: string): T; + + /** + * @see _.min + * @param whereValue _.where style callback + **/ + min( + collection: Array, + whereValue: W): T; + + /** + * @see _.min + * @param whereValue _.where style callback + **/ + min( + collection: List, + whereValue: W): T; + + /** + * @see _.min + * @param whereValue _.where style callback + **/ + min( + collection: Dictionary, + whereValue: W): T; + } + + interface LoDashArrayWrapper { + /** + * @see _.min + **/ + min( + callback?: ListIterator, + thisArg?: any): LoDashWrapper; + + /** + * @see _.min + * @param pluckValue _.pluck style callback + **/ + min( + pluckValue: string): LoDashWrapper; + + /** + * @see _.min + * @param whereValue _.where style callback + **/ + min( + whereValue: W): LoDashWrapper; + } + + //_.round + interface LoDashStatic { + /** + * Calculates n rounded to precision. + * @param n The number to round. + * @param precision The precision to round to. + * @return Returns the rounded number. + */ + round(n: number, precision?: number): number; + } + + interface LoDashWrapper { + /** + * @see _.round + */ + round(precision?: number): number; + } + + //_.sum + interface LoDashStatic { + /** + * Gets the sum of the values in collection. + * + * @param collection The collection to iterate over. + * @param iteratee The function invoked per iteration. + * @param thisArg The this binding of iteratee. + * @return Returns the sum. + **/ + sum( + collection: Array): number; + + /** + * @see _.sum + **/ + sum( + collection: List): number; + + /** + * @see _.sum + **/ + sum( + collection: Dictionary): number; + + /** + * @see _.sum + **/ + sum( + collection: Array, + iteratee: ListIterator, + thisArg?: any): number; + + /** + * @see _.sum + **/ + sum( + collection: List, + iteratee: ListIterator, + thisArg?: any): number; + + /** + * @see _.sum + **/ + sum( + collection: Dictionary, + iteratee: ObjectIterator, + thisArg?: any): number; + + /** + * @see _.sum + * @param property _.property callback shorthand. + **/ + sum( + collection: Array, + property: string): number; + + /** + * @see _.sum + * @param property _.property callback shorthand. + **/ + sum( + collection: List, + property: string): number; + + /** + * @see _.sum + * @param property _.property callback shorthand. + **/ + sum( + collection: Dictionary, + property: string): number; + } + + interface LoDashNumberArrayWrapper { + /** + * @see _.sum + **/ + sum(): number; + + /** + * @see _.sum + **/ + sum( + iteratee: ListIterator, + thisArg?: any): number; + } + + interface LoDashArrayWrapper { + /** + * @see _.sum + **/ + sum(): number; + + /** + * @see _.sum + **/ + sum( + iteratee: ListIterator, + thisArg?: any): number; + + /** + * @see _.sum + * @param property _.property callback shorthand. + **/ + sum( + property: string): number; + } + + interface LoDashObjectWrapper { + /** + * @see _.sum + **/ + sum(): number; + + /** + * @see _.sum + **/ + sum( + iteratee: ObjectIterator, + thisArg?: any): number; + + /** + * @see _.sum + * @param property _.property callback shorthand. + **/ + sum( + property: string): number; + } + + //_.pluck + interface LoDashStatic { + /** + * Retrieves the value of a specified property from all elements in the collection. + * @param collection The collection to iterate over. + * @param property The property to pluck. + * @return A new array of property values. + **/ + pluck( + collection: Array, + property: string|string[]): any[]; + + /** + * @see _.pluck + **/ + pluck( + collection: List, + property: string|string[]): any[]; + + /** + * @see _.pluck + **/ + pluck( + collection: Dictionary, + property: string|string[]): any[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.pluck + **/ + pluck( + property: string): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.pluck + **/ + pluck( + property: string): LoDashArrayWrapper; + } + + //_.partition + interface LoDashStatic { + /** + * Creates an array of elements split into two groups, the first of which contains elements predicate returns truthy for, + * while the second of which contains elements predicate returns falsey for. + * The predicate is bound to thisArg and invoked with three arguments: (value, index|key, collection). + * + * If a property name is provided for predicate the created _.property style callback + * returns the property value of the given element. + * + * If a value is also provided for thisArg the created _.matchesProperty style callback + * returns true for elements that have a matching property value, else false. + * + * If an object is provided for predicate the created _.matches style callback returns + * true for elements that have the properties of the given object, else false. + * + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of predicate. + * @return Returns the array of grouped elements. + **/ + partition( + collection: List, + callback: ListIterator, + thisArg?: any): T[][]; + + /** + * @see _.partition + **/ + partition( + collection: Dictionary, + callback: DictionaryIterator, + thisArg?: any): T[][]; + + /** + * @see _.partition + **/ + partition( + collection: List, + whereValue: W): T[][]; + + /** + * @see _.partition + **/ + partition( + collection: Dictionary, + whereValue: W): T[][]; + + /** + * @see _.partition + **/ + partition( + collection: List, + path: string, + srcValue: any): T[][]; + + /** + * @see _.partition + **/ + partition( + collection: Dictionary, + path: string, + srcValue: any): T[][]; + + /** + * @see _.partition + **/ + partition( + collection: List, + pluckValue: string): T[][]; + + /** + * @see _.partition + **/ + partition( + collection: Dictionary, + pluckValue: string): T[][]; + } + + interface LoDashStringWrapper { + /** + * @see _.partition + */ + partition( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + } + + interface LoDashArrayWrapper { + /** + * @see _.partition + */ + partition( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + /** + * @see _.partition + */ + partition( + whereValue: W): LoDashArrayWrapper; + /** + * @see _.partition + */ + partition( + path: string, + srcValue: any): LoDashArrayWrapper; + /** + * @see _.partition + */ + partition( + pluckValue: string): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.partition + */ + partition( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.partition + */ + partition( + callback: DictionaryIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.partition + */ + partition( + whereValue: W): LoDashArrayWrapper; + + /** + * @see _.partition + */ + partition( + path: string, + srcValue: any): LoDashArrayWrapper; + + /** + * @see _.partition + */ + partition( + pluckValue: string): LoDashArrayWrapper; + } + + //_.reduce + interface LoDashStatic { + /** + * Reduces a collection to a value which is the accumulated result of running each + * element in the collection through the callback, where each successive callback execution + * consumes the return value of the previous execution. If accumulator is not provided the + * first element of the collection will be used as the initial accumulator value. The callback + * is bound to thisArg and invoked with four arguments; (accumulator, value, index|key, collection). + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param accumulator Initial value of the accumulator. + * @param thisArg The this binding of callback. + * @return Returns the accumulated value. + **/ + reduce( + collection: Array, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + reduce( + collection: List, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + reduce( + collection: Dictionary, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + reduce( + collection: Array, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + reduce( + collection: List, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + reduce( + collection: Dictionary, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + collection: Array, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + collection: List, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + collection: Dictionary, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + collection: Array, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + collection: List, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + collection: Dictionary, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + collection: Array, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + collection: List, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + collection: Dictionary, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + collection: Array, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + collection: List, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + collection: Dictionary, + callback: MemoIterator, + thisArg?: any): TResult; + } + + interface LoDashArrayWrapper { + /** + * @see _.reduce + **/ + reduce( + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + reduce( + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + callback: MemoIterator, + thisArg?: any): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.reduce + **/ + reduce( + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + reduce( + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + inject( + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduce + **/ + foldl( + callback: MemoIterator, + thisArg?: any): TResult; + } + + //_.reduceRight + interface LoDashStatic { + /** + * This method is like _.reduce except that it iterates over elements of a collection from + * right to left. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param accumulator Initial value of the accumulator. + * @param thisArg The this binding of callback. + * @return The accumulated value. + **/ + reduceRight( + collection: Array, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + reduceRight( + collection: List, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + reduceRight( + collection: Dictionary, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + reduceRight( + collection: Array, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + reduceRight( + collection: List, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + reduceRight( + collection: Dictionary, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + foldr( + collection: Array, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + foldr( + collection: List, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + foldr( + collection: Dictionary, + callback: MemoIterator, + accumulator: TResult, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + foldr( + collection: Array, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + foldr( + collection: List, + callback: MemoIterator, + thisArg?: any): TResult; + + /** + * @see _.reduceRight + **/ + foldr( + collection: Dictionary, + callback: MemoIterator, + thisArg?: any): TResult; + } + + //_.reject + interface LoDashStatic { + /** + * The opposite of _.filter this method returns the elements of a collection that + * the callback does not return truey for. + * + * If a property name is provided for callback the created "_.pluck" style callback + * will return the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will + * return true for elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return A new array of elements that failed the callback check. + **/ + reject( + collection: Array, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.reject + **/ + reject( + collection: List, + callback: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.reject + **/ + reject( + collection: Dictionary, + callback: DictionaryIterator, + thisArg?: any): T[]; + + /** + * @see _.reject + * @param pluckValue _.pluck style callback + **/ + reject( + collection: Array, + pluckValue: string): T[]; + + /** + * @see _.reject + * @param pluckValue _.pluck style callback + **/ + reject( + collection: List, + pluckValue: string): T[]; + + /** + * @see _.reject + * @param pluckValue _.pluck style callback + **/ + reject( + collection: Dictionary, + pluckValue: string): T[]; + + /** + * @see _.reject + * @param whereValue _.where style callback + **/ + reject( + collection: Array, + whereValue: W): T[]; + + /** + * @see _.reject + * @param whereValue _.where style callback + **/ + reject( + collection: List, + whereValue: W): T[]; + + /** + * @see _.reject + * @param whereValue _.where style callback + **/ + reject( + collection: Dictionary, + whereValue: W): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.reject + **/ + reject( + callback: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.reject + * @param pluckValue _.pluck style callback + **/ + reject(pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.reject + * @param whereValue _.where style callback + **/ + reject(whereValue: W): LoDashArrayWrapper; + } + + //_.sample + interface LoDashStatic { + /** + * Retrieves a random element or n random elements from a collection. + * @param collection The collection to sample. + * @return Returns the random sample(s) of collection. + **/ + sample(collection: Array): T; + + /** + * @see _.sample + **/ + sample(collection: List): T; + + /** + * @see _.sample + **/ + sample(collection: Dictionary): T; + + /** + * @see _.sample + * @param n The number of elements to sample. + **/ + sample(collection: Array, n: number): T[]; + + /** + * @see _.sample + * @param n The number of elements to sample. + **/ + sample(collection: List, n: number): T[]; + + /** + * @see _.sample + * @param n The number of elements to sample. + **/ + sample(collection: Dictionary, n: number): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.sample + **/ + sample(n: number): LoDashArrayWrapper; + + /** + * @see _.sample + **/ + sample(): LoDashWrapper; + } + + //_.shuffle + interface LoDashStatic { + /** + * Creates an array of shuffled values, using a version of the Fisher-Yates shuffle. + * See http://en.wikipedia.org/wiki/Fisher-Yates_shuffle. + * @param collection The collection to shuffle. + * @return Returns a new shuffled collection. + **/ + shuffle(collection: Array): T[]; + + /** + * @see _.shuffle + **/ + shuffle(collection: List): T[]; + + /** + * @see _.shuffle + **/ + shuffle(collection: Dictionary): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.shuffle + **/ + shuffle(): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.shuffle + **/ + shuffle(): LoDashArrayWrapper; + } + + //_.size + interface LoDashStatic { + /** + * Gets the size of the collection by returning collection.length for arrays and array-like + * objects or the number of own enumerable properties for objects. + * @param collection The collection to inspect. + * @return collection.length + **/ + size(collection: Array): number; + + /** + * @see _.size + **/ + size(collection: List): number; + + /** + * @see _.size + * @param object The object to inspect + * @return The number of own enumerable properties. + **/ + size(object: T): number; + + /** + * @see _.size + * @param aString The string to inspect + * @return The length of aString + **/ + size(aString: string): number; + } + + interface LoDashArrayWrapper { + /** + * @see _.size + **/ + size(): number; + } + + interface LoDashObjectWrapper { + /** + * @see _.size + **/ + size(): number; + } + + //_.some + interface LoDashStatic { + /** + * Checks if the callback returns a truey value for any element of a collection. The function + * returns as soon as it finds a passing value and does not iterate over the entire collection. + * The callback is bound to thisArg and invoked with three arguments; (value, index|key, collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will return + * the property value of the given element. + * + * If an object is provided for callback the created "_.where" style callback will return true for + * elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return True if any element passed the callback check, else false. + **/ + some( + collection: Array, + callback?: ListIterator, + thisArg?: any): boolean; + + /** + * @see _.some + **/ + some( + collection: List, + callback?: ListIterator, + thisArg?: any): boolean; + + /** + * @see _.some + **/ + some( + collection: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): boolean; + + /** + * @see _.some + **/ + some( + collection: {}, + callback?: ListIterator<{}, boolean>, + thisArg?: any): boolean; + + /** + * @see _.some + * @param pluckValue _.pluck style callback + **/ + some( + collection: Array, + pluckValue: string): boolean; + + /** + * @see _.some + * @param pluckValue _.pluck style callback + **/ + some( + collection: List, + pluckValue: string): boolean; + + /** + * @see _.some + * @param pluckValue _.pluck style callback + **/ + some( + collection: Dictionary, + pluckValue: string): boolean; + + /** + * @see _.some + * @param whereValue _.where style callback + **/ + some( + collection: Array, + whereValue: W): boolean; + + /** + * @see _.some + * @param whereValue _.where style callback + **/ + some( + collection: List, + whereValue: W): boolean; + + /** + * @see _.some + * @param whereValue _.where style callback + **/ + some( + collection: Dictionary, + whereValue: W): boolean; + + /** + * @see _.some + **/ + any( + collection: Array, + callback?: ListIterator, + thisArg?: any): boolean; + + /** + * @see _.some + **/ + any( + collection: List, + callback?: ListIterator, + thisArg?: any): boolean; + + /** + * @see _.some + **/ + any( + collection: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): boolean; + + /** + * @see _.some + **/ + any( + collection: {}, + callback?: ListIterator<{}, boolean>, + thisArg?: any): boolean; + + /** + * @see _.some + * @param pluckValue _.pluck style callback + **/ + any( + collection: Array, + pluckValue: string): boolean; + + /** + * @see _.some + * @param pluckValue _.pluck style callback + **/ + any( + collection: List, + pluckValue: string): boolean; + + /** + * @see _.some + * @param pluckValue _.pluck style callback + **/ + any( + collection: Dictionary, + pluckValue: string): boolean; + + /** + * @see _.some + * @param whereValue _.where style callback + **/ + any( + collection: Array, + whereValue: W): boolean; + + /** + * @see _.some + * @param whereValue _.where style callback + **/ + any( + collection: List, + whereValue: W): boolean; + + /** + * @see _.some + * @param whereValue _.where style callback + **/ + any( + collection: Dictionary, + whereValue: W): boolean; + } + + //_.sortBy + interface LoDashStatic { + /** + * Creates an array of elements, sorted in ascending order by the results of running each + * element in a collection through the callback. This method performs a stable sort, that + * is, it will preserve the original sort order of equal elements. The callback is bound + * to thisArg and invoked with three arguments; (value, index|key, collection). + * + * If a property name is provided for callback the created "_.pluck" style callback will + * return the property value of the given element. + * + * If a value is also provided for thisArg the created "_.matchesProperty" style callback + * returns true for elements that have a matching property value, else false. + * + * If an object is provided for an iteratee the created "_.matches" style callback returns + * true for elements that have the properties of the given object, else false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return A new array of sorted elements. + **/ + sortBy( + collection: Array, + iteratee?: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.sortBy + **/ + sortBy( + collection: List, + iteratee?: ListIterator, + thisArg?: any): T[]; + + /** + * @see _.sortBy + * @param pluckValue _.pluck style callback + **/ + sortBy( + collection: Array, + pluckValue: string): T[]; + + /** + * @see _.sortBy + * @param pluckValue _.pluck style callback + **/ + sortBy( + collection: List, + pluckValue: string): T[]; + + /** + * @see _.sortBy + * @param whereValue _.where style callback + **/ + sortBy( + collection: Array, + whereValue: W): T[]; + + /** + * @see _.sortBy + * @param whereValue _.where style callback + **/ + sortBy( + collection: List, + whereValue: W): T[]; + + /** + * Sorts by all the given arguments, using either ListIterator, pluckValue, or whereValue foramts + * @param args The rules by which to sort + */ + sortByAll( + collection: (Array|List), + ...args: (ListIterator|Object|string)[] + ): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.sortBy + **/ + sortBy( + iteratee?: ListIterator, + thisArg?: any): LoDashArrayWrapper; + + /** + * @see _.sortBy + * @param pluckValue _.pluck style callback + **/ + sortBy(pluckValue: string): LoDashArrayWrapper; + + /** + * @see _.sortBy + * @param whereValue _.where style callback + **/ + sortBy(whereValue: W): LoDashArrayWrapper; + + /** + * Sorts by all the given arguments, using either ListIterator, pluckValue, or whereValue foramts + * @param args The rules by which to sort + */ + sortByAll(...args: (ListIterator|Object|string)[]): LoDashArrayWrapper; + } + + //_.sortByAll + interface LoDashStatic { + /** + * This method is like "_.sortBy" except that it can sort by multiple iteratees or + * property names. + * + * If a property name is provided for an iteratee the created "_.property" style callback + * returns the property value of the given element. + * + * If a value is also provided for thisArg the created "_.matchesProperty" style callback + * returns true for elements that have a matching property value, else false. + * + * If an object is provided for an iteratee the created "_.matches" style callback returns + * true for elements that have the properties of the given object, else false. + * + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return A new array of sorted elements. + **/ + sortByAll( + collection: Array, + iteratees: (ListIterator|string|Object)[]): T[]; + + /** + * @see _.sortByAll + **/ + sortByAll( + collection: List, + iteratees: (ListIterator|string|Object)[]): T[]; + + /** + * @see _.sortByAll + **/ + sortByAll( + collection: Array, + ...iteratees: (ListIterator|string|Object)[]): T[]; + + /** + * @see _.sortByAll + **/ + sortByAll( + collection: List, + ...iteratees: (ListIterator|string|Object)[]): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.sortByAll + **/ + sortByAll( + iteratees: (ListIterator|string|Object)[]): LoDashArrayWrapper; + + /** + * @see _.sortByAll + **/ + sortByAll( + ...iteratees: (ListIterator|string|Object)[]): LoDashArrayWrapper; + } + + //_.sortByOrder + interface LoDashStatic { + /** + * This method is like "_.sortByAll" except that it allows specifying the sort orders of the + * iteratees to sort by. If orders is unspecified, all values are sorted in ascending order. + * Otherwise, a value is sorted in ascending order if its corresponding order is "asc", and + * descending if "desc". + * + * If a property name is provided for an iteratee the created "_.property" style callback + * returns the property value of the given element. + * + * If an object is provided for an iteratee the created "_.matches" style callback returns + * true for elements that have the properties of the given object, else false. + * + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return A new array of sorted elements. + **/ + sortByOrder( + collection: Array, + iteratees: (ListIterator|string|Object)[], + orders?: boolean[]): T[]; + + /** + * @see _.sortByOrder + **/ + sortByOrder( + collection: List, + iteratees: (ListIterator|string|Object)[], + orders?: boolean[]): T[]; + + /** + * @see _.sortByOrder + **/ + sortByOrder( + collection: Array, + iteratees: (ListIterator|string|Object)[], + orders?: string[]): T[]; + + /** + * @see _.sortByOrder + **/ + sortByOrder( + collection: List, + iteratees: (ListIterator|string|Object)[], + orders?: string[]): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.sortByOrder + **/ + sortByOrder( + iteratees: (ListIterator|string|Object)[], + orders?: boolean[]): LoDashArrayWrapper; + + /** + * @see _.sortByOrder + **/ + sortByOrder( + iteratees: (ListIterator|string|Object)[], + orders?: string[]): LoDashArrayWrapper; + } + + //_.toArray + interface LoDashStatic { + /** + * Converts the collection to an array. + * @param collection The collection to convert. + * @return The new converted array. + **/ + toArray(collection: Array): T[]; + + /** + * @see _.toArray + **/ + toArray(collection: List): T[]; + + /** + * @see _.toArray + **/ + toArray(collection: Dictionary): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.toArray + **/ + toArray(): LoDashArrayWrapper; + } + + interface LoDashObjectWrapper { + /** + * @see _.toArray + **/ + toArray(): LoDashArrayWrapper; + } + + //_.where + interface LoDashStatic { + /** + * Performs a deep comparison of each element in a collection to the given properties + * object, returning an array of all elements that have equivalent property values. + * @param collection The collection to iterate over. + * @param properties The object of property values to filter by. + * @return A new array of elements that have the given properties. + **/ + where( + list: Array, + properties: U): T[]; + + /** + * @see _.where + **/ + where( + list: List, + properties: U): T[]; + + /** + * @see _.where + **/ + where( + list: Dictionary, + properties: U): T[]; + } + + interface LoDashArrayWrapper { + /** + * @see _.where + **/ + where(properties: U): LoDashArrayWrapper; + } + + /******** + * Date * + ********/ + + //_.now + interface LoDashStatic { + /** + * Gets the number of milliseconds that have elapsed since the Unix epoch + * (1 January 1970 00:00:00 UTC). + * @return The number of milliseconds. + **/ + now(): number; + } + + /************* + * Functions * + *************/ + + //_.after + interface LoDashStatic { + /** + * Creates a function that executes func, with the this binding and arguments of the + * created function, only after being called n times. + * @param n The number of times the function must be called before func is executed. + * @param func The function to restrict. + * @return The new restricted function. + **/ + after( + n: number, + func: Function): Function; + } + + interface LoDashWrapper { + /** + * @see _.after + **/ + after(func: Function): LoDashObjectWrapper; + } + + //_.ary + interface LoDashStatic { + /** + * Creates a function that accepts up to n arguments ignoring any additional arguments. + * @param func The function to cap arguments for. + * @param n The arity cap. + * @param guard Enables use as a callback for functions like `_.map`. + * @returns Returns the new function. + */ + ary(func: Function, n?: number, guard?: Object): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.ary + */ + ary(n?: number, guard?: Object): LoDashObjectWrapper; + } + + //_.backflow + interface LoDashStatic { + /** + * @see _.flowRight + */ + backflow(...funcs: Function[]): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.flowRight + **/ + backflow(...funcs: Function[]): LoDashObjectWrapper; + } + + //_.before + interface LoDashStatic { + /** + * Creates a function that invokes func, with the this binding and arguments of the created function, while + * it is called less than n times. Subsequent calls to the created function return the result of the last func + * invocation. + * @param n The number of calls at which func is no longer invoked. + * @param func The function to restrict. + * @return Returns the new restricted function. + */ + before(n: number, func: TFunc): TFunc; + } + + interface LoDashWrapper { + /** + * @sed _.before + */ + before(func: TFunc): TFunc; + } + + //_.bind + interface LoDashStatic { + /** + * Creates a function that, when called, invokes func with the this binding of thisArg + * and prepends any additional bind arguments to those provided to the bound function. + * @param func The function to bind. + * @param thisArg The this binding of func. + * @param args Arguments to be partially applied. + * @return The new bound function. + **/ + bind( + func: Function, + thisArg: any, + ...args: any[]): (...args: any[]) => any; + } + + interface LoDashObjectWrapper { + /** + * @see _.bind + **/ + bind( + thisArg: any, + ...args: any[]): LoDashObjectWrapper<(...args: any[]) => any>; + } + + //_.bindAll + interface LoDashStatic { + /** + * Binds methods of an object to the object itself, overwriting the existing method. Method + * names may be specified as individual arguments or as arrays of method names. If no method + * names are provided all the function properties of object will be bound. + * @param object The object to bind and assign the bound methods to. + * @param methodNames The object method names to bind, specified as individual method names + * or arrays of method names. + * @return object + **/ + bindAll( + object: T, + ...methodNames: string[]): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.bindAll + **/ + bindAll(...methodNames: string[]): LoDashWrapper; + } + + //_.bindKey + interface LoDashStatic { + /** + * Creates a function that, when called, invokes the method at object[key] and prepends any + * additional bindKey arguments to those provided to the bound function. This method differs + * from _.bind by allowing bound functions to reference methods that will be redefined or don't + * yet exist. See http://michaux.ca/articles/lazy-function-definition-pattern. + * @param object The object the method belongs to. + * @param key The key of the method. + * @param args Arguments to be partially applied. + * @return The new bound function. + **/ + bindKey( + object: T, + key: string, + ...args: any[]): Function; + } + + interface LoDashObjectWrapper { + /** + * @see _.bindKey + **/ + bindKey( + key: string, + ...args: any[]): LoDashObjectWrapper; + } + + //_.compose + interface LoDashStatic { + /** + * @see _.flowRight + */ + compose(...funcs: Function[]): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.flowRight + */ + compose(...funcs: Function[]): LoDashObjectWrapper; + } + + //_.createCallback + interface LoDashStatic { + /** + * Produces a callback bound to an optional thisArg. If func is a property name the created + * callback will return the property value for a given element. If func is an object the created + * callback will return true for elements that contain the equivalent object properties, + * otherwise it will return false. + * @param func The value to convert to a callback. + * @param thisArg The this binding of the created callback. + * @param argCount The number of arguments the callback accepts. + * @return A callback function. + **/ + createCallback( + func: string, + thisArg?: any, + argCount?: number): () => any; + + /** + * @see _.createCallback + **/ + createCallback( + func: Dictionary, + thisArg?: any, + argCount?: number): () => boolean; + } + + interface LoDashWrapper { + /** + * @see _.createCallback + **/ + createCallback( + thisArg?: any, + argCount?: number): LoDashObjectWrapper<() => any>; + } + + interface LoDashObjectWrapper { + /** + * @see _.createCallback + **/ + createCallback( + thisArg?: any, + argCount?: number): LoDashObjectWrapper<() => any>; + } + + //_.curry + interface LoDashStatic { + /** + * Creates a function that accepts one or more arguments of func that when called either invokes func returning + * its result, if all func arguments have been provided, or returns a function that accepts one or more of the + * remaining func arguments, and so on. The arity of func may be specified if func.length is not sufficient. + * @param func The function to curry. + * @param arity The arity of func. + * @return Returns the new curried function. + */ + curry( + func: Function, + arity?: number): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.curry + **/ + curry(arity?: number): LoDashObjectWrapper; + } + + //_.curryRight + interface LoDashStatic { + /** + * This method is like _.curry except that arguments are applied to func in the manner of _.partialRight + * instead of _.partial. + * @param func The function to curry. + * @param arity The arity of func. + * @return Returns the new curried function. + */ + curryRight( + func: Function, + arity?: number): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.curryRight + **/ + curryRight(arity?: number): LoDashObjectWrapper; + } + + //_.debounce + interface LoDashStatic { + /** + * Creates a function that will delay the execution of func until after wait milliseconds have + * elapsed since the last time it was invoked. Provide an options object to indicate that func + * should be invoked on the leading and/or trailing edge of the wait timeout. Subsequent calls + * to the debounced function will return the result of the last func call. + * + * Note: If leading and trailing options are true func will be called on the trailing edge of + * the timeout only if the the debounced function is invoked more than once during the wait + * timeout. + * @param func The function to debounce. + * @param wait The number of milliseconds to delay. + * @param options The options object. + * @param options.leading Specify execution on the leading edge of the timeout. + * @param options.maxWait The maximum time func is allowed to be delayed before it's called. + * @param options.trailing Specify execution on the trailing edge of the timeout. + * @return The new debounced function. + **/ + debounce( + func: T, + wait: number, + options?: DebounceSettings): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.debounce + **/ + debounce( + wait: number, + options?: DebounceSettings): LoDashObjectWrapper; + } + + interface DebounceSettings { + /** + * Specify execution on the leading edge of the timeout. + **/ + leading?: boolean; + + /** + * The maximum time func is allowed to be delayed before it's called. + **/ + maxWait?: number; + + /** + * Specify execution on the trailing edge of the timeout. + **/ + trailing?: boolean; + } + + //_.defer + interface LoDashStatic { + /** + * Defers executing the func function until the current call stack has cleared. Additional + * arguments will be provided to func when it is invoked. + * @param func The function to defer. + * @param args Arguments to invoke the function with. + * @return The timer id. + **/ + defer( + func: Function, + ...args: any[]): number; + } + + interface LoDashObjectWrapper { + /** + * @see _.defer + **/ + defer(...args: any[]): LoDashWrapper; + } + + //_.delay + interface LoDashStatic { + /** + * Executes the func function after wait milliseconds. Additional arguments will be provided + * to func when it is invoked. + * @param func The function to delay. + * @param wait The number of milliseconds to delay execution. + * @param args Arguments to invoke the function with. + * @return The timer id. + **/ + delay( + func: Function, + wait: number, + ...args: any[]): number; + } + + interface LoDashObjectWrapper { + /** + * @see _.delay + **/ + delay( + wait: number, + ...args: any[]): LoDashWrapper; + } + + //_.flow + interface LoDashStatic { + /** + * Creates a function that returns the result of invoking the provided functions with the this binding of the + * created function, where each successive invocation is supplied the return value of the previous. + * @param funcs Functions to invoke. + * @return Returns the new function. + */ + flow(...funcs: Function[]): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.flow + **/ + flow(...funcs: Function[]): LoDashObjectWrapper; + } + + //_.flowRight + interface LoDashStatic { + /** + * This method is like _.flow except that it creates a function that invokes the provided functions from right + * to left. + * @param funcs Functions to invoke. + * @return Returns the new function. + */ + flowRight(...funcs: Function[]): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.flowRight + **/ + flowRight(...funcs: Function[]): LoDashObjectWrapper; + } + + //_.memoize + interface MemoizedFunction extends Function { + cache: MapCache; + } + + interface LoDashStatic { + /** + * Creates a function that memoizes the result of func. If resolver is provided it determines the cache key for + * storing the result based on the arguments provided to the memoized function. By default, the first argument + * provided to the memoized function is coerced to a string and used as the cache key. The func is invoked with + * the this binding of the memoized function. + * @param func The function to have its output memoized. + * @param resolver The function to resolve the cache key. + * @return Returns the new memoizing function. + */ + memoize( + func: Function, + resolver?: Function): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.memoize + */ + memoize(resolver?: Function): LoDashObjectWrapper; + } + + //_.modArgs + interface LoDashStatic { + /** + * Creates a function that runs each argument through a corresponding transform function. + * @param func The function to wrap. + * @param transforms The functions to transform arguments, specified as individual functions or arrays + * of functions. + * @return Returns the new function. + */ + modArgs( + func: T, + ...transforms: Function[] + ): TResult; + + /** + * @see _.modArgs + */ + modArgs( + func: T, + transforms: Function[] + ): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.modArgs + */ + modArgs(...transforms: Function[]): LoDashObjectWrapper; + + /** + * @see _.modArgs + */ + modArgs(transforms: Function[]): LoDashObjectWrapper; + } + + //_.negate + interface LoDashStatic { + /** + * Creates a function that negates the result of the predicate func. The func predicate is invoked with + * the this binding and arguments of the created function. + * @param predicate The predicate to negate. + * @return Returns the new function. + */ + negate(predicate: T): (...args: any[]) => boolean; + + /** + * @see _.negate + */ + negate(predicate: T): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.negate + */ + negate(): LoDashObjectWrapper<(...args: any[]) => boolean>; + + /** + * @see _.negate + */ + negate(): LoDashObjectWrapper; + } + + //_.once + interface LoDashStatic { + /** + * Creates a function that is restricted to invoking func once. Repeat calls to the function return the value + * of the first call. The func is invoked with the this binding and arguments of the created function. + * @param func The function to restrict. + * @return Returns the new restricted function. + */ + + once(func: T): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.once + */ + once(): LoDashObjectWrapper; + } + + //_.partial + interface LoDashStatic { + /** + * Creates a function that, when called, invokes func with any additional partial arguments + * prepended to those provided to the new function. This method is similar to _.bind except + * it does not alter the this binding. + * @param func The function to partially apply arguments to. + * @param args Arguments to be partially applied. + * @return The new partially applied function. + **/ + partial( + func: Function, + ...args: any[]): Function; + } + + //_.partialRight + interface LoDashStatic { + /** + * This method is like _.partial except that partial arguments are appended to those provided + * to the new function. + * @param func The function to partially apply arguments to. + * @param args Arguments to be partially applied. + * @return The new partially applied function. + **/ + partialRight( + func: Function, + ...args: any[]): Function; + } + + //_.rearg + interface LoDashStatic { + /** + * Creates a function that invokes func with arguments arranged according to the specified indexes where the + * argument value at the first index is provided as the first argument, the argument value at the second index + * is provided as the second argument, and so on. + * @param func The function to rearrange arguments for. + * @param indexes The arranged argument indexes, specified as individual indexes or arrays of indexes. + * @return Returns the new function. + */ + rearg(func: Function, indexes: number[]): TResult; + + /** + * @see _.rearg + */ + rearg(func: Function, ...indexes: number[]): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.rearg + */ + rearg(indexes: number[]): LoDashObjectWrapper; + + /** + * @see _.rearg + */ + rearg(...indexes: number[]): LoDashObjectWrapper; + } + + //_.restParam + interface LoDashStatic { + /** + * Creates a function that invokes func with the this binding of the created function and arguments from start + * and beyond provided as an array. + * @param func The function to apply a rest parameter to. + * @param start The start position of the rest parameter. + * @return Returns the new function. + */ + restParam(func: Function, start?: number): TResult; + + /** + * @see _.restParam + */ + restParam(func: TFunc, start?: number): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.restParam + */ + restParam(start?: number): LoDashObjectWrapper; + } + + //_.spread + interface LoDashStatic { + /** + * Creates a function that invokes func with the this binding of the created function and an array of arguments + * much like Function#apply. + * @param func The function to spread arguments over. + * @return Returns the new function. + */ + spread(func: Function): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.spread + */ + spread(): LoDashObjectWrapper; + } + + + //_.throttle + interface LoDashStatic { + /** + * Creates a function that, when executed, will only call the func function at most once per + * every wait milliseconds. Provide an options object to indicate that func should be invoked + * on the leading and/or trailing edge of the wait timeout. Subsequent calls to the throttled + * function will return the result of the last func call. + * + * Note: If leading and trailing options are true func will be called on the trailing edge of + * the timeout only if the the throttled function is invoked more than once during the wait timeout. + * @param func The function to throttle. + * @param wait The number of milliseconds to throttle executions to. + * @param options The options object. + * @param options.leading Specify execution on the leading edge of the timeout. + * @param options.trailing Specify execution on the trailing edge of the timeout. + * @return The new throttled function. + **/ + throttle( + func: T, + wait: number, + options?: ThrottleSettings): T; + } + + interface ThrottleSettings { + + /** + * If you'd like to disable the leading-edge call, pass this as false. + **/ + leading?: boolean; + + /** + * If you'd like to disable the execution on the trailing-edge, pass false. + **/ + trailing?: boolean; + } + + //_.wrap + interface LoDashStatic { + /** + * Creates a function that provides value to the wrapper function as its first argument. + * Additional arguments provided to the function are appended to those provided to the + * wrapper function. The wrapper is executed with the this binding of the created function. + * @param value The value to wrap. + * @param wrapper The wrapper function. + * @return The new function. + **/ + wrap( + value: any, + wrapper: (func: Function, ...args: any[]) => any): Function; + } + + /******** + * Lang * + ********/ + + //_.clone + interface LoDashStatic { + /** + * Creates a clone of value. If isDeep is true nested objects are cloned, otherwise they are assigned by + * reference. If customizer is provided it’s invoked to produce the cloned values. If customizer returns + * undefined cloning is handled by the method instead. The customizer is bound to thisArg and invoked with up + * to three argument; (value [, index|key, object]). + * Note: This method is loosely based on the structured clone algorithm. The enumerable properties of arguments + * objects and objects created by constructors other than Object are cloned to plain Object objects. An empty + * object is returned for uncloneable values such as functions, DOM nodes, Maps, Sets, and WeakMaps. + * @param value The value to clone. + * @param isDeep Specify a deep clone. + * @param customizer The function to customize cloning values. + * @param thisArg The this binding of customizer. + * @return Returns the cloned value. + */ + clone( + value: T, + isDeep?: boolean, + customizer?: (value: any) => any, + thisArg?: any): T; + + /** + * @see _.clone + */ + clone( + value: T, + customizer?: (value: any) => any, + thisArg?: any): T; + } + + interface LoDashWrapper { + /** + * @see _.clone + */ + clone( + isDeep?: boolean, + customizer?: (value: any) => any, + thisArg?: any): T; + + /** + * @see _.clone + */ + clone( + customizer?: (value: any) => any, + thisArg?: any): T; + } + + interface LoDashArrayWrapper { + /** + * @see _.clone + */ + clone( + isDeep?: boolean, + customizer?: (value: any) => any, + thisArg?: any): T[]; + + /** + * @see _.clone + */ + clone( + customizer?: (value: any) => any, + thisArg?: any): T[]; + } + + interface LoDashObjectWrapper { + /** + * @see _.clone + */ + clone( + isDeep?: boolean, + customizer?: (value: any) => any, + thisArg?: any): T; + + /** + * @see _.clone + */ + clone( + customizer?: (value: any) => any, + thisArg?: any): T; + } + + //_.cloneDeep + interface LoDashStatic { + /** + * Creates a deep clone of value. If customizer is provided it’s invoked to produce the cloned values. If + * customizer returns undefined cloning is handled by the method instead. The customizer is bound to thisArg + * and invoked with up to three argument; (value [, index|key, object]). + * Note: This method is loosely based on the structured clone algorithm. The enumerable properties of arguments + * objects and objects created by constructors other than Object are cloned to plain Object objects. An empty + * object is returned for uncloneable values such as functions, DOM nodes, Maps, Sets, and WeakMaps. + * @param value The value to deep clone. + * @param customizer The function to customize cloning values. + * @param thisArg The this binding of customizer. + * @return Returns the deep cloned value. + */ + cloneDeep( + value: T, + customizer?: (value: any) => any, + thisArg?: any): T; + } + + interface LoDashWrapper { + /** + * @see _.cloneDeep + */ + cloneDeep( + customizer?: (value: any) => any, + thisArg?: any): T; + } + + interface LoDashArrayWrapper { + /** + * @see _.cloneDeep + */ + cloneDeep( + customizer?: (value: any) => any, + thisArg?: any): T[]; + } + + interface LoDashObjectWrapper { + /** + * @see _.cloneDeep + */ + cloneDeep( + customizer?: (value: any) => any, + thisArg?: any): T; + } + + //_.gt + interface LoDashStatic { + /** + * Checks if value is greater than other. + * @param value The value to compare. + * @param other The other value to compare. + * @return Returns true if value is greater than other, else false. + */ + gt(value: any, other: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.gt + */ + gt(other: any): boolean; + } + + //_.gte + interface LoDashStatic { + /** + * Checks if value is greater than or equal to other. + * @param value The value to compare. + * @param other The other value to compare. + * @return Returns true if value is greater than or equal to other, else false. + */ + gte(value: any, other: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.gte + */ + gte(other: any): boolean; + } + + //_.isArguments + interface LoDashStatic { + /** + * Checks if value is classified as an arguments object. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + */ + isArguments(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isArguments + */ + isArguments(): boolean; + } + + //_.isArray + interface LoDashStatic { + /** + * Checks if value is classified as an Array object. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + **/ + isArray(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isArray + */ + isArray(): boolean; + } + + //_.isBoolean + interface LoDashStatic { + /** + * Checks if value is classified as a boolean primitive or object. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + **/ + isBoolean(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isBoolean + */ + isBoolean(): boolean; + } + + //_.isDate + interface LoDashStatic { + /** + * Checks if value is classified as a Date object. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + **/ + isDate(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isDate + */ + isDate(): boolean; + } + + //_.isElement + interface LoDashStatic { + /** + * Checks if value is a DOM element. + * @param value The value to check. + * @return Returns true if value is a DOM element, else false. + */ + isElement(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isElement + */ + isElement(): boolean; + } + + //_.isEmpty + interface LoDashStatic { + /** + * Checks if value is empty. A value is considered empty unless it’s an arguments object, array, string, or + * jQuery-like collection with a length greater than 0 or an object with own enumerable properties. + * @param value The value to inspect. + * @return Returns true if value is empty, else false. + **/ + isEmpty(value?: any[]|Dictionary|string|any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isEmpty + */ + isEmpty(): boolean; + } + + //_.isError + interface LoDashStatic { + /** + * Checks if value is an Error, EvalError, RangeError, ReferenceError, SyntaxError, TypeError, or URIError + * object. + * @param value The value to check. + * @return Returns true if value is an error object, else false. + */ + isError(value: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isError + */ + isError(): boolean; + } + + //_.isFinite + interface LoDashStatic { + /** + * Checks if value is a finite primitive number. + * Note: This method is based on Number.isFinite. + * @param value The value to check. + * @return Returns true if value is a finite number, else false. + **/ + isFinite(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isFinite + */ + isFinite(): boolean; + } + + //_.isFunction + interface LoDashStatic { + /** + * Checks if value is classified as a Function object. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + **/ + isFunction(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isFunction + */ + isFunction(): boolean; + } + + //_.isMatch + interface isMatchCustomizer { + (value: any, other: any, indexOrKey?: number|string): boolean; + } + + interface LoDashStatic { + /** + * Performs a deep comparison between object and source to determine if object contains equivalent property + * values. If customizer is provided it’s invoked to compare values. If customizer returns undefined + * comparisons are handled by the method instead. The customizer is bound to thisArg and invoked with three + * arguments: (value, other, index|key). + * @param object The object to inspect. + * @param source The object of property values to match. + * @param customizer The function to customize value comparisons. + * @param thisArg The this binding of customizer. + * @return Returns true if object is a match, else false. + */ + isMatch(object: Object, source: Object, customizer?: isMatchCustomizer, thisArg?: any): boolean; + } + + interface LoDashObjectWrapper { + /** + * @see _.isMatch + */ + isMatch(source: Object, customizer?: isMatchCustomizer, thisArg?: any): boolean; + } + + //_.isNaN + interface LoDashStatic { + /** + * Checks if value is NaN. + * Note: This method is not the same as isNaN which returns true for undefined and other non-numeric values. + * @param value The value to check. + * @return Returns true if value is NaN, else false. + */ + isNaN(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.isNaN + */ + isNaN(): boolean; + } + + //_.isNative + interface LoDashStatic { + /** + * Checks if value is a native function. + * @param value The value to check. + * @retrun Returns true if value is a native function, else false. + */ + isNative(value: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isNative + */ + isNative(): boolean; + } + + //_.isNull + interface LoDashStatic { + /** + * Checks if value is null. + * @param value The value to check. + * @return Returns true if value is null, else false. + **/ + isNull(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isNull + */ + isNull(): boolean; + } + + //_.isNumber + interface LoDashStatic { + /** + * Checks if value is classified as a Number primitive or object. + * Note: To exclude Infinity, -Infinity, and NaN, which are classified as numbers, use the _.isFinite method. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + */ + isNumber(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isNumber + */ + isNumber(): boolean; + } + + //_.isObject + interface LoDashStatic { + /** + * Checks if value is the language type of Object. (e.g. arrays, functions, objects, regexes, new Number(0), + * and new String('')) + * @param value The value to check. + * @return Returns true if value is an object, else false. + **/ + isObject(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isObject + */ + isObject(): boolean; + } + + //_.isPlainObject + interface LoDashStatic { + /** + * Checks if value is a plain object, that is, an object created by the Object constructor or one with a + * [[Prototype]] of null. + * + * Note: This method assumes objects created by the Object constructor have no inherited enumerable properties. + * + * @param value The value to check. + * @return Returns true if value is a plain object, else false. + */ + isPlainObject(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isPlainObject + */ + isPlainObject(): boolean; + } + + //_.isRegExp + interface LoDashStatic { + /** + * Checks if value is classified as a RegExp object. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + */ + isRegExp(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isRegExp + */ + isRegExp(): boolean; + } + + //_.isString + interface LoDashStatic { + /** + * Checks if value is classified as a String primitive or object. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + **/ + isString(value?: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isString + */ + isString(): boolean; + } + + //_.isTypedArray + interface LoDashStatic { + /** + * Checks if value is classified as a typed array. + * @param value The value to check. + * @return Returns true if value is correctly classified, else false. + */ + isTypedArray(value: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isTypedArray + */ + isTypedArray(): boolean; + } + + //_.isUndefined + interface LoDashStatic { + /** + * Checks if value is undefined. + * @param value The value to check. + * @return Returns true if value is undefined, else false. + **/ + isUndefined(value: any): boolean; + } + + interface LoDashWrapperBase { + /** + * see _.isUndefined + */ + isUndefined(): boolean; + } + + //_.lt + interface LoDashStatic { + /** + * Checks if value is less than other. + * @param value The value to compare. + * @param other The other value to compare. + * @return Returns true if value is less than other, else false. + */ + lt(value: any, other: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.lt + */ + lt(other: any): boolean; + } + + //_.lte + interface LoDashStatic { + /** + * Checks if value is less than or equal to other. + * @param value The value to compare. + * @param other The other value to compare. + * @return Returns true if value is less than or equal to other, else false. + */ + lte(value: any, other: any): boolean; + } + + interface LoDashWrapperBase { + /** + * @see _.lte + */ + lte(other: any): boolean; + } + + //_.toPlainObject + interface LoDashStatic { + /** + * Converts value to a plain object flattening inherited enumerable properties of value to own properties + * of the plain object. + * @param value The value to convert. + * @return Returns the converted plain object. + */ + toPlainObject(value?: any): Object; + } + + /******** + * Math * + ********/ + + //_.add + interface LoDashStatic { + /** + * Adds two numbers. + * @param augend The first number to add. + * @param addend The second number to add. + * @return Returns the sum. + */ + add(augend: number, addend: number): number; + } + + interface LoDashWrapper { + /** + * @see _.add + */ + add(addend: number): number; + } + + /********** + * Number * + **********/ + + //_.inRange + interface LoDashStatic { + /** + * Checks if n is between start and up to but not including, end. If end is not specified it’s set to start + * with start then set to 0. + * @param n The number to check. + * @param start The start of the range. + * @param end The end of the range. + * @return Returns true if n is in the range, else false. + */ + inRange(n: number, start: number, end: number): boolean; + + + /** + * @see _.inRange + */ + inRange(n: number, end: number): boolean; + } + + interface LoDashWrapper { + /** + * @see _.inRange + */ + inRange(start: number, end: number): boolean; + + /** + * @see _.inRange + */ + inRange(end: number): boolean; + } + + /********** + * Object * + **********/ + + //_.assign + interface LoDashStatic { + /** + * Assigns own enumerable properties of source object(s) to the destination object. Subsequent + * sources will overwrite property assignments of previous sources. If a callback is provided + * it will be executed to produce the assigned values. The callback is bound to thisArg and + * invoked with two arguments; (objectValue, sourceValue). + * @param object The destination object. + * @param s1-8 The source object(s) + * @param callback The function to customize merging properties. + * @param thisArg The this binding of callback. + * @return The destination object. + **/ + assign( + object: T, + s1: S1, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.assign + **/ + assign( + object: T, + s1: S1, + s2: S2, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.assign + **/ + assign( + object: T, + s1: S1, + s2: S2, + s3: S3, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.assign + **/ + assign( + object: T, + s1: S1, + s2: S2, + s3: S3, + s4: S4, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.assign + **/ + extend( + object: T, + s1: S1, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.assign + **/ + extend( + object: T, + s1: S1, + s2: S2, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.assign + **/ + extend( + object: T, + s1: S1, + s2: S2, + s3: S3, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.assign + **/ + extend( + object: T, + s1: S1, + s2: S2, + s3: S3, + s4: S4, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + } + + interface LoDashObjectWrapper { + /** + * @see _.assign + **/ + assign( + s1: S1, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + + /** + * @see _.assign + **/ + assign( + s1: S1, + s2: S2, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + /** + * @see _.assign + **/ + assign( + s1: S1, + s2: S2, + s3: S3, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + /** + * @see _.assign + **/ + assign( + s1: S1, + s2: S2, + s3: S3, + s4: S4, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + /** + * @see _.assign + **/ + assign( + s1: S1, + s2: S2, + s3: S3, + s4: S4, + s5: S5, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + + /** + * @see _.assign + **/ + extend( + s1: S1, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + + /** + * @see _.assign + **/ + extend( + s1: S1, + s2: S2, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + /** + * @see _.assign + **/ + extend( + s1: S1, + s2: S2, + s3: S3, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + /** + * @see _.assign + **/ + extend( + s1: S1, + s2: S2, + s3: S3, + s4: S4, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + /** + * @see _.assign + **/ + extend( + s1: S1, + s2: S2, + s3: S3, + s4: S4, + s5: S5, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): TResult; + + } + + //_.create + interface LoDashStatic { + /** + * Creates an object that inherits from the given prototype object. If a properties object is provided its own + * enumerable properties are assigned to the created object. + * @param prototype The object to inherit from. + * @param properties The properties to assign to the object. + * @return Returns the new object. + */ + create(prototype: Object, properties?: Object): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.create + */ + create(properties?: Object): LoDashObjectWrapper; + } + + //_.defaults + interface LoDashStatic { + /** + * Assigns own enumerable properties of source object(s) to the destination object for all + * destination properties that resolve to undefined. Once a property is set, additional defaults + * of the same property will be ignored. + * @param object The destination object. + * @param sources The source objects. + * @return The destination object. + **/ + defaults( + object: T, + ...sources: any[]): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.defaults + **/ + defaults(...sources: any[]): LoDashObjectWrapper + } + + //_.defaultsDeep + interface LoDashStatic { + /** + * This method is like _.defaults except that it recursively assigns default properties. + * @param object The destination object. + * @param sources The source objects. + * @return Returns object. + **/ + defaultsDeep( + object: T, + ...sources: any[]): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.defaultsDeep + **/ + defaultsDeep(...sources: any[]): LoDashObjectWrapper + } + + //_.findKey + interface LoDashStatic { + /** + * This method is like _.findIndex except that it returns the key of the first element that + * passes the callback check, instead of the element itself. + * @param object The object to search. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return The key of the found element, else undefined. + **/ + findKey( + object: any, + callback: (value: any) => boolean, + thisArg?: any): string; + + /** + * @see _.findKey + * @param pluckValue _.pluck style callback + **/ + findKey( + object: any, + pluckValue: string): string; + + /** + * @see _.findKey + * @param whereValue _.where style callback + **/ + findKey, T>( + object: T, + whereValue: W): string; + } + + //_.findLastKey + interface LoDashStatic { + /** + * This method is like _.findKey except that it iterates over elements of a collection in the opposite order. + * @param object The object to search. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return The key of the found element, else undefined. + **/ + findLastKey( + object: any, + callback: (value: any) => boolean, + thisArg?: any): string; + + /** + * @see _.findLastKey + * @param pluckValue _.pluck style callback + **/ + findLastKey( + object: any, + pluckValue: string): string; + + /** + * @see _.findLastKey + * @param whereValue _.where style callback + **/ + findLastKey, T>( + object: T, + whereValue: W): string; + } + + //_.forIn + interface LoDashStatic { + /** + * Iterates over own and inherited enumerable properties of an object, executing the callback for + * each property. The callback is bound to thisArg and invoked with three arguments; (value, key, + * object). Callbacks may exit iteration early by explicitly returning false. + * @param object The object to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return object + **/ + forIn( + object: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): Dictionary; + + /** + * @see _.forIn + **/ + forIn( + object: T, + callback?: ObjectIterator, + thisArg?: any): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.forIn + **/ + forIn( + callback: ObjectIterator, + thisArg?: any): _.LoDashObjectWrapper; + } + + //_.forInRight + interface LoDashStatic { + /** + * This method is like _.forIn except that it iterates over elements of a collection in the + * opposite order. + * @param object The object to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return object + **/ + forInRight( + object: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): Dictionary; + + /** + * @see _.forInRight + **/ + forInRight( + object: T, + callback?: ObjectIterator, + thisArg?: any): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.forInRight + **/ + forInRight( + callback: ObjectIterator, + thisArg?: any): _.LoDashObjectWrapper; + } + + //_.forOwn + interface LoDashStatic { + /** + * Iterates over own enumerable properties of an object, executing the callback for each + * property. The callback is bound to thisArg and invoked with three arguments; (value, key, + * object). Callbacks may exit iteration early by explicitly returning false. + * @param object The object to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return object + **/ + forOwn( + object: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): Dictionary; + + /** + * @see _.forOwn + **/ + forOwn( + object: T, + callback?: ObjectIterator, + thisArg?: any): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.forOwn + **/ + forOwn( + callback: ObjectIterator, + thisArg?: any): _.LoDashObjectWrapper; + } + + //_.forOwnRight + interface LoDashStatic { + /** + * This method is like _.forOwn except that it iterates over elements of a collection in the + * opposite order. + * @param object The object to iterate over. + * @param callback The function called per iteration. + * @param thisArg The this binding of callback. + * @return object + **/ + forOwnRight( + object: Dictionary, + callback?: DictionaryIterator, + thisArg?: any): Dictionary; + /** + * @see _.forOwnRight + **/ + forOwnRight( + object: T, + callback?: ObjectIterator, + thisArg?: any): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.forOwnRight + **/ + forOwnRight( + callback: ObjectIterator, + thisArg?: any): _.LoDashObjectWrapper; + } + + //_.functions + interface LoDashStatic { + /** + * Creates a sorted array of property names of all enumerable properties, own and inherited, of + * object that have function values. + * @param object The object to inspect. + * @return An array of property names that have function values. + **/ + functions(object: any): string[]; + + /** + * @see _functions + **/ + methods(object: any): string[]; + } + + interface LoDashObjectWrapper { + /** + * @see _.functions + **/ + functions(): _.LoDashArrayWrapper; + + /** + * @see _.functions + **/ + methods(): _.LoDashArrayWrapper; + } + + //_.get + interface LoDashStatic { + /** + * Gets the property value at path of object. If the resolved + * value is undefined the defaultValue is used in its place. + * @param object The object to query. + * @param path The path of the property to get. + * @param defaultValue The value returned if the resolved value is undefined. + * @return Returns the resolved value. + **/ + get(object: Object, + path: string|number|boolean|Array, + defaultValue?:TResult + ): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.get + **/ + get(path: string|number|boolean|Array, + defaultValue?: TResult + ): TResult; + } + + //_.has + interface LoDashStatic { + /** + * Checks if path is a direct property. + * + * @param object The object to query. + * @param path The path to check. + * @return Returns true if path is a direct property, else false. + */ + has(object: any, path: string|number|boolean|Array): boolean; + } + + interface LoDashObjectWrapper { + /** + * @see _.has + */ + has(path: string|number|boolean|Array): boolean; + } + + //_.invert + interface LoDashStatic { + /** + * Creates an object composed of the inverted keys and values of the given object. + * @param object The object to invert. + * @return The created inverted object. + **/ + invert(object: any): any; + } + + //_.isEqual + interface EqCustomizer { + (value: any, other: any, indexOrKey?: number|string): boolean; + } + + interface LoDashStatic { + /** + * Performs a deep comparison between two values to determine if they are equivalent. If customizer is + * provided it is invoked to compare values. If customizer returns undefined comparisons are handled + * by the method instead. The customizer is bound to thisArg and invoked with three + * arguments: (value, other [, index|key]). + * @param value The value to compare. + * @param other The other value to compare. + * @param callback The function to customize value comparisons. + * @param thisArg The this binding of customizer. + * @return True if the values are equivalent, else false. + */ + isEqual(value?: any, + other?: any, + callback?: EqCustomizer, + thisArg?: any): boolean; + + /** + * @see _.isEqual + */ + eq(value?: any, + other?: any, + callback?: EqCustomizer, + thisArg?: any): boolean; + } + + interface LoDashWrapper { + /** + * @see _.isEqual + */ + isEqual(other?: any, + callback?: EqCustomizer, + thisArg?: any): boolean; + + /** + * @see _.isEqual + */ + eq(other?: any, + callback?: EqCustomizer, + thisArg?: any): boolean; + + } + + interface LoDashArrayWrapper { + /** + * @see _.isEqual + */ + isEqual(other?: any, + callback?: EqCustomizer, + thisArg?: any): boolean; + + /** + * @see _.isEqual + */ + eq(other?: any, + callback?: EqCustomizer, + thisArg?: any): boolean; + } + + interface LoDashObjectWrapper { + /** + * @see _.isEqual + */ + isEqual(other?: any, + callback?: EqCustomizer, + thisArg?: any): boolean; + + /** + * @see _.isEqual + */ + eq(other?: any, + callback?: EqCustomizer, + thisArg?: any): boolean; + } + + //_.keys + interface LoDashStatic { + /** + * Creates an array composed of the own enumerable property names of an object. + * @param object The object to inspect. + * @return An array of property names. + **/ + keys(object?: any): string[]; + } + + interface LoDashObjectWrapper { + /** + * @see _.keys + **/ + keys(): LoDashArrayWrapper + } + + //_.keysIn + interface LoDashStatic { + /** + * Creates an array of the own and inherited enumerable property names of object. + * @param object The object to query. + * @return An array of property names. + **/ + keysIn(object?: any): string[]; + } + + interface LoDashObjectWrapper { + /** + * @see _.keysIn + **/ + keysIn(): LoDashArrayWrapper + } + + //_.mapValues + interface LoDashStatic { + /** + * Creates an object with the same keys as object and values generated by running each own + * enumerable property of object through iteratee. The iteratee function is bound to thisArg + * and invoked with three arguments: (value, key, object). + * + * If a property name is provided iteratee the created "_.property" style callback returns + * the property value of the given element. + * + * If a value is also provided for thisArg the creted "_.matchesProperty" style callback returns + * true for elements that have a matching property value, else false;. + * + * If an object is provided for iteratee the created "_.matches" style callback returns true + * for elements that have the properties of the given object, else false. + * + * @param {Object} object The object to iterate over. + * @param {Function|Object|string} [iteratee=_.identity] The function invoked per iteration. + * @param {Object} [thisArg] The `this` binding of `iteratee`. + * @return {Object} Returns the new mapped object. + */ + mapValues(obj: Dictionary, callback: ObjectIterator, thisArg?: any): Dictionary; + mapValues(obj: Dictionary, where: Dictionary): Dictionary; + mapValues(obj: T, pluck: string): TMapped; + mapValues(obj: T, callback: ObjectIterator, thisArg?: any): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.mapValues + * TValue is the type of the property values of T. + * TResult is the type output by the ObjectIterator function + */ + mapValues(callback: ObjectIterator, thisArg?: any): LoDashObjectWrapper>; + + /** + * @see _.mapValues + * TResult is the type of the property specified by pluck. + * T should be a Dictionary> + */ + mapValues(pluck: string): LoDashObjectWrapper>; + + /** + * @see _.mapValues + * TResult is the type of the properties on the object specified by pluck. + * T should be a Dictionary>> + */ + mapValues(pluck: string, where: Dictionary): LoDashArrayWrapper>; + + /** + * @see _.mapValues + * TResult is the type of the properties of each object in the values of T + * T should be a Dictionary> + */ + mapValues(where: Dictionary): LoDashArrayWrapper; + } + + //_.merge + interface LoDashStatic { + /** + * Recursively merges own enumerable properties of the source object(s), that don't resolve + * to undefined into the destination object. Subsequent sources will overwrite property + * assignments of previous sources. If a callback is provided it will be executed to produce + * the merged values of the destination and source properties. If the callback returns undefined + * merging will be handled by the method instead. The callback is bound to thisArg and invoked + * with two arguments; (objectValue, sourceValue). + * @param object The destination object. + * @param s1-8 The source object(s) + * @param callback The function to customize merging properties. + * @param thisArg The this binding of callback. + * @return The destination object. + **/ + merge( + object: T, + s1: S1, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.merge + **/ + merge( + object: T, + s1: S1, + s2: S2, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.merge + **/ + merge( + object: T, + s1: S1, + s2: S2, + s3: S3, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + + /** + * @see _.merge + **/ + merge( + object: T, + s1: S1, + s2: S2, + s3: S3, + s4: S4, + callback?: (objectValue: Value, sourceValue: Value) => Value, + thisArg?: any): Result; + } + + //_.omit + interface LoDashStatic { + /** + * Creates a shallow clone of object excluding the specified properties. Property names may be + * specified as individual arguments or as arrays of property names. If a callback is provided + * it will be executed for each property of object omitting the properties the callback returns + * truey for. The callback is bound to thisArg and invoked with three arguments; (value, key, + * object). + * @param object The source object. + * @param keys The properties to omit. + * @return An object without the omitted properties. + **/ + omit( + object: T, + ...keys: string[]): Omitted; + + /** + * @see _.omit + **/ + omit( + object: T, + keys: string[]): Omitted; + + /** + * @see _.omit + **/ + omit( + object: T, + callback: ObjectIterator, + thisArg?: any): Omitted; + } + + interface LoDashObjectWrapper { + /** + * @see _.omit + **/ + omit( + ...keys: string[]): LoDashObjectWrapper; + + /** + * @see _.omit + **/ + omit( + keys: string[]): LoDashObjectWrapper; + + /** + * @see _.omit + **/ + omit( + callback: ObjectIterator, + thisArg?: any): LoDashObjectWrapper; + } + + //_.pairs + interface LoDashStatic { + /** + * Creates a two dimensional array of an object's key-value pairs, + * i.e. [[key1, value1], [key2, value2]]. + * @param object The object to inspect. + * @return Aew array of key-value pairs. + **/ + pairs(object?: any): any[][]; + } + + interface LoDashObjectWrapper { + /** + * @see _.pairs + **/ + pairs(): LoDashArrayWrapper; + } + + //_.pick + interface LoDashStatic { + /** + * Creates an object composed of the picked object properties. Property names may be specified as individual + * arguments or as arrays of property names. If predicate is provided it’s invoked for each property of object + * picking the properties predicate returns truthy for. The predicate is bound to thisArg and invoked with + * three arguments: (value, key, object). + * + * @param object The source object. + * @param predicate The function invoked per iteration or property names to pick, specified as individual + * property names or arrays of property names. + * @param thisArg The this binding of predicate. + * @return An object composed of the picked properties. + */ + pick( + object: T, + predicate: ObjectIterator, + thisArg?: any + ): TResult; + + /** + * @see _.pick + */ + pick( + object: T, + ...predicate: Array> + ): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.pick + */ + pick( + predicate: ObjectIterator, + thisArg?: any + ): LoDashObjectWrapper; + + /** + * @see _.pick + */ + pick( + ...predicate: Array> + ): LoDashObjectWrapper; + } + + //_.set + interface LoDashStatic { + /** + * Sets the property value of path on object. If a portion of path does not exist it is created. + * @param object The object to augment. + * @param path The path of the property to set. + * @param value The value to set. + * @return Returns object. + **/ + set(object: T, + path: string|string[], + value: any): T; + } + + interface LoDashObjectWrapper { + /** + * @see _.set + **/ + set(path: string|string[], + value: any): LoDashObjectWrapper; + } + + //_.transform + interface LoDashStatic { + /** + * An alternative to _.reduce this method transforms object to a new accumulator object which is + * the result of running each of its elements through a callback, with each callback execution + * potentially mutating the accumulator object. The callback is bound to thisArg and invoked with + * four arguments; (accumulator, value, key, object). Callbacks may exit iteration early by + * explicitly returning false. + * @param collection The collection to iterate over. + * @param callback The function called per iteration. + * @param accumulator The custom accumulator value. + * @param thisArg The this binding of callback. + * @return The accumulated value. + **/ + transform( + collection: Array, + callback: MemoVoidIterator, + accumulator: Acc, + thisArg?: any): Acc; + + /** + * @see _.transform + **/ + transform( + collection: List, + callback: MemoVoidIterator, + accumulator: Acc, + thisArg?: any): Acc; + + /** + * @see _.transform + **/ + transform( + collection: Dictionary, + callback: MemoVoidIterator, + accumulator: Acc, + thisArg?: any): Acc; + + /** + * @see _.transform + **/ + transform( + collection: Array, + callback?: MemoVoidIterator, + thisArg?: any): Acc; + + /** + * @see _.transform + **/ + transform( + collection: List, + callback?: MemoVoidIterator, + thisArg?: any): Acc; + + /** + * @see _.transform + **/ + transform( + collection: Dictionary, + callback?: MemoVoidIterator, + thisArg?: any): Acc; + } + + //_.values + interface LoDashStatic { + /** + * Creates an array of the own enumerable property values of object. + * @param object The object to query. + * @return Returns an array of property values. + **/ + values(object?: any): T[]; + } + + interface LoDashObjectWrapper { + /** + * @see _.values + **/ + values(): LoDashObjectWrapper; + } + + //_.valuesIn + interface LoDashStatic { + /** + * Creates an array of the own and inherited enumerable property values of object. + * @param object The object to query. + * @return Returns the array of property values. + **/ + valuesIn(object?: any): T[]; + } + + interface LoDashObjectWrapper { + /** + * @see _.valuesIn + **/ + valuesIn(): LoDashObjectWrapper; + } + + /********** + * String * + **********/ + + //_.camelCase + interface LoDashStatic { + /** + * Converts string to camel case. + * @param string The string to convert. + * @return Returns the camel cased string. + */ + camelCase(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.camelCase + */ + camelCase(): string; + } + + //_.capitalize + interface LoDashStatic { + capitalize(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.capitalize + */ + capitalize(): string; + } + + //_.deburr + interface LoDashStatic { + /** + * Deburrs string by converting latin-1 supplementary letters to basic latin letters and removing combining + * diacritical marks. + * @param string The string to deburr. + * @return Returns the deburred string. + */ + deburr(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.deburr + */ + deburr(): string; + } + + //_.endsWith + interface LoDashStatic { + /** + * Checks if string ends with the given target string. + * @param string The string to search. + * @param target The string to search for. + * @param position The position to search from. + * @return Returns true if string ends with target, else false. + */ + endsWith(string?: string, target?: string, position?: number): boolean; + } + + interface LoDashWrapper { + /** + * @see _.endsWith + */ + endsWith(target?: string, position?: number): boolean; + } + + // _.escape + interface LoDashStatic { + /** + * Converts the characters "&", "<", ">", '"', "'", and "`", in string to their corresponding HTML entities. + * @param string The string to escape. + * @return Returns the escaped string. + */ + escape(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.escape + */ + escape(): string; + } + + // _.escapeRegExp + interface LoDashStatic { + /** + * Escapes the RegExp special characters "\", "/", "^", "$", ".", "|", "?", "*", "+", "(", ")", "[", "]", + * "{" and "}" in string. + * @param string The string to escape. + * @return Returns the escaped string. + */ + escapeRegExp(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.escapeRegExp + */ + escapeRegExp(): string; + } + + //_.kebabCase + interface LoDashStatic { + /** + * Converts string to kebab case. + * @param string The string to convert. + * @return Returns the kebab cased string. + */ + kebabCase(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.kebabCase + */ + kebabCase(): string; + } + + interface LoDashStatic { + /** + * + * @param string The string to pad. + * @param length The padding length. + * @param chars The string used as padding. + * @return Returns the padded string. + */ + pad(string?: string, length?: number, chars?: string): string; + } + + //_.pad + interface LoDashWrapper { + /** + * @see _.pad + */ + pad(length?: number, chars?: string): string; + } + + //_.padLeft + interface LoDashStatic { + /** + * Pads string on the left side if it’s shorter than length. Padding characters are truncated if they exceed + * length. + * @param string The string to pad. + * @param length The padding length. + * @param chars The string used as padding. + * @return Returns the padded string. + */ + padLeft(string?: string, length?: number, chars?: string): string; + } + + //_.padLeft + interface LoDashWrapper { + /** + * @see _.padLeft + */ + padLeft(length?: number, chars?: string): string; + } + + //_.padRight + interface LoDashStatic { + /** + * Pads string on the right side if it’s shorter than length. Padding characters are truncated if they exceed + * length. + * @param string The string to pad. + * @param length The padding length. + * @param chars The string used as padding. + * @return Returns the padded string. + */ + padRight(string?: string, length?: number, chars?: string): string; + } + + //_.padRight + interface LoDashWrapper { + /** + * @see _.padRight + */ + padRight(length?: number, chars?: string): string; + } + + //_.parseInt + interface LoDashStatic { + /** + * Converts string to an integer of the specified radix. If radix is undefined or 0, a radix of 10 is used + * unless value is a hexadecimal, in which case a radix of 16 is used. + * Note: This method aligns with the ES5 implementation of parseInt. + * @param string The string to convert. + * @param radix The radix to interpret value by. + * @return Returns the converted integer. + */ + parseInt(string: string, radix?: number): number; + } + + interface LoDashWrapper { + /** + * @see _.parseInt + */ + parseInt(radix?: number): number; + } + + //_.repeat + interface LoDashStatic { + /** + * Repeats the given string n times. + * @param string The string to repeat. + * @param n The number of times to repeat the string. + * @return Returns the repeated string. + */ + repeat(string?: string, n?: number): string; + } + + interface LoDashWrapper { + /** + * @see _.repeat + */ + repeat(n?: number): string; + } + + //_.snakeCase + interface LoDashStatic { + /** + * Converts string to snake case. + * @param string The string to convert. + * @return Returns the snake cased string. + */ + snakeCase(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.snakeCase + */ + snakeCase(): string; + } + + //_.startCase + interface LoDashStatic { + /** + * Converts string to start case. + * @param string The string to convert. + * @return Returns the start cased string. + */ + startCase(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.startCase + */ + startCase(): string; + } + + //_.startsWith + interface LoDashStatic { + /** + * Checks if string starts with the given target string. + * @param string The string to search. + * @param target The string to search for. + * @param position The position to search from. + * @return Returns true if string starts with target, else false. + */ + startsWith(string?: string, target?: string, position?: number): boolean; + } + + interface LoDashWrapper { + /** + * @see _.startsWith + */ + startsWith(target?: string, position?: number): boolean; + } + + //_.template + interface TemplateExecutor { + (data?: Object): string; + source: string; + } + + interface LoDashStatic { + /** + * Creates a compiled template function that can interpolate data properties in "interpolate" delimiters, + * HTML-escape interpolated data properties in "escape" delimiters, and execute JavaScript in "evaluate" + * delimiters. Data properties may be accessed as free variables in the template. If a setting object is + * provided it takes precedence over _.templateSettings values. + * + * Note: In the development build _.template utilizes + * [sourceURLs](http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/#toc-sourceurl) for easier + * debugging. + * + * For more information on precompiling templates see + * [lodash's custom builds documentation](https://lodash.com/custom-builds). + * + * For more information on Chrome extension sandboxes see + * [Chrome's extensions documentation](https://developer.chrome.com/extensions/sandboxingEval). + * + * @param string The template string. + * @param options The options object. + * @param options.escape The HTML "escape" delimiter. + * @param options.evaluate The "evaluate" delimiter. + * @param options.imports An object to import into the template as free variables. + * @param options.interpolate The "interpolate" delimiter. + * @param options.variable The data object variable name. + * @return Returns the compiled template function. + */ + template( + string: string, + options?: TemplateSettings): TemplateExecutor; + } + + interface LoDashWrapper { + /** + * @see _.template + */ + template(options?: TemplateSettings): TemplateExecutor; + } + + //_.trim + interface LoDashStatic { + /** + * Removes leading and trailing whitespace or specified characters from string. + * @param string The string to trim. + * @param chars The characters to trim. + * @return Returns the trimmed string. + */ + trim(string?: string, chars?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.trim + */ + trim(chars?: string): string; + } + + //_.trimLeft + interface LoDashStatic { + /** + * Removes leading whitespace or specified characters from string. + * @param string The string to trim. + * @param chars The characters to trim. + * @return Returns the trimmed string. + */ + trimLeft(string?: string, chars?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.trimLeft + */ + trimLeft(chars?: string): string; + } + + //_.trimRight + interface LoDashStatic { + /** + * Removes trailing whitespace or specified characters from string. + * @param string The string to trim. + * @param chars The characters to trim. + * @return Returns the trimmed string. + */ + trimRight(string?: string, chars?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.trimRight + */ + trimRight(chars?: string): string; + } + + //_.trunc + interface TruncOptions { + /** The maximum string length. */ + length?: number; + /** The string to indicate text is omitted. */ + omission?: string; + /** The separator pattern to truncate to. */ + separator?: string|RegExp; + } + + interface LoDashStatic { + /** + * Truncates string if it’s longer than the given maximum string length. The last characters of the truncated + * string are replaced with the omission string which defaults to "…". + * @param string The string to truncate. + * @param options The options object or maximum string length. + * @return Returns the truncated string. + */ + trunc(string?: string, options?: TruncOptions|number): string; + } + + interface LoDashWrapper { + /** + * @see _.trunc + */ + trunc(options?: TruncOptions|number): string; + } + + //_.unescape + interface LoDashStatic { + /** + * The inverse of _.escape; this method converts the HTML entities &, <, >, ", ', and ` + * in string to their corresponding characters. + * @param string The string to unescape. + * @return Returns the unescaped string. + */ + unescape(string?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.unescape + */ + unescape(): string; + } + + //_.words + interface LoDashStatic { + /** + * Splits string into an array of its words. + * @param string The string to inspect. + * @param pattern The pattern to match words. + * @return Returns the words of string. + */ + words(string?: string, pattern?: string|RegExp): string[]; + } + + interface LoDashWrapper { + /** + * @see _.words + */ + words(pattern?: string|RegExp): string[]; + } + + /*********** + * Utility * + ***********/ + + //_.attempt + interface LoDashStatic { + /** + * Attempts to invoke func, returning either the result or the caught error object. Any additional arguments + * are provided to func when it’s invoked. + * @param func The function to attempt. + * @return Returns the func result or error object. + */ + attempt(func: (...args: any[]) => TResult): TResult|Error; + } + + interface LoDashObjectWrapper { + /** + * @see _.attempt + */ + attempt(): TResult|Error; + } + + //_.identity + interface LoDashStatic { + /** + * This method returns the first argument provided to it. + * @param value Any value. + * @return Returns value. + */ + identity(value?: T): T; + } + + interface LoDashWrapper { + /** + * @see _.identity + */ + identity(): T; + } + + interface LoDashArrayWrapper { + /** + * @see _.identity + */ + identity(): T[]; + } + + interface LoDashObjectWrapper { + /** + * @see _.identity + */ + identity(): T; + } + + //_.method + interface LoDashStatic { + /** + * Creates a function that invokes the method at path on a given object. Any additional arguments are provided + * to the invoked method. + * @param path The path of the method to invoke. + * @param args The arguments to invoke the method with. + * @return Returns the new function. + */ + method(path: string, ...args: any[]): (object: any) => TResult; + + /** + * @see _.method + */ + method(path: any[], ...args: any[]): (object: any) => TResult; + } + + interface LoDashWrapper { + /** + * @see _.method + */ + method(...args: any[]): LoDashWrapper<(object: any) => TResult>; + + /** + * @see _.method + */ + method(...args: any[]): LoDashWrapper<(object: any) => TResult>; + } + + interface LoDashArrayWrapper { + /** + * @see _.method + */ + method(...args: any[]): LoDashWrapper<(object: any) => TResult>; + + /** + * @see _.method + */ + method(...args: any[]): LoDashWrapper<(object: any) => TResult>; + } + + //_.methodOf + interface LoDashStatic { + /** + * The opposite of _.method; this method creates a function that invokes the method at a given path on object. + * Any additional arguments are provided to the invoked method. + * @param object The object to query. + * @param args The arguments to invoke the method with. + * @return Returns the new function. + */ + methodOf(object: Object, ...args: any[]): (path: string | any[]) => TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.methodOf + */ + methodOf(...args: any[]): LoDashObjectWrapper<(path: string | any[]) => TResult>; + } + + //_.mixin + interface MixinOptions { + chain?: boolean; + } + + interface LoDashStatic { + /** + * Adds all own enumerable function properties of a source object to the destination object. If object is a + * function then methods are added to its prototype as well. + * + * Note: Use _.runInContext to create a pristine lodash function to avoid conflicts caused by modifying + * the original. + * + * @param object The destination object. + * @param source The object of functions to add. + * @param options The options object. + * @param options.chain Specify whether the functions added are chainable. + * @return Returns object. + */ + mixin( + object: TObject, + source: Dictionary, + options?: MixinOptions + ): TResult; + + /** + * @see _.mixin + */ + mixin( + source: Dictionary, + options?: MixinOptions + ): TResult; + } + + interface LoDashObjectWrapper { + /** + * @see _.mixin + */ + mixin( + source: Dictionary, + options?: MixinOptions + ): LoDashObjectWrapper; + + /** + * @see _.mixin + */ + mixin( + options?: MixinOptions + ): LoDashObjectWrapper; + } + + //_.noConflict + interface LoDashStatic { + /** + * Reverts the '_' variable to its previous value and returns a reference to the lodash function. + * @return The lodash function. + **/ + noConflict(): typeof _; + } + + //_.noop + interface LoDashStatic { + /** + * A no-operation function that returns undefined regardless of the arguments it receives. + * @return undefined + */ + noop(...args: any[]): void; + } + + interface LoDashWrapperBase { + /** + * @see _.noop + */ + noop(...args: any[]): void; + } + + //_.property + interface LoDashStatic { + /** + * Creates a function that returns the property value at path on a given object. + * @param path The path of the property to get. + * @return Returns the new function. + */ + property(path: string|string[]): (obj: TObj) => TResult; + } + + interface LoDashStringWrapper { + /** + * @see _.property + */ + property(): LoDashObjectWrapper<(obj: TObj) => TResult>; + } + + interface LoDashArrayWrapper { + /** + * @see _.property + */ + property(): LoDashObjectWrapper<(obj: TObj) => TResult>; + } + + //_.propertyOf + interface LoDashStatic { + /** + * The opposite of _.property; this method creates a function that returns the property value at a given path + * on object. + * @param object The object to query. + * @return Returns the new function. + */ + propertyOf(object: T): (path: string|string[]) => any; + } + + interface LoDashObjectWrapper { + /** + * @see _.propertyOf + */ + propertyOf(): LoDashObjectWrapper<(path: string|string[]) => any>; + } + + //_.range + interface LoDashStatic { + /** + * Creates an array of numbers (positive and/or negative) progressing from start up to, but not including, end. + * If end is not specified it’s set to start with start then set to 0. If end is less than start a zero-length + * range is created unless a negative step is specified. + * @param start The start of the range. + * @param end The end of the range. + * @param step The value to increment or decrement by. + * @return Returns a new range array. + */ + range( + start: number, + end: number, + step?: number): number[]; + + /** + * @see _.range + */ + range( + end: number, + step?: number): number[]; + } + + interface LoDashWrapper { + /** + * @see _.range + */ + range( + end?: number, + step?: number): LoDashArrayWrapper; + } + + //_.random + interface LoDashStatic { + /** + * Produces a random number between min and max (inclusive). If only one argument is provided a + * number between 0 and the given number will be returned. If floating is truey or either min or + * max are floats a floating-point number will be returned instead of an integer. + * @param max The maximum possible value. + * @param floating Specify returning a floating-point number. + * @return A random number. + **/ + random(max: number, floating?: boolean): number; + + /** + * @see _.random + * @param min The minimum possible value. + * @return A random number between `min` and `max`. + **/ + random(min: number, max: number, floating?: boolean): number; + } + + //_.result + interface LoDashStatic { + /** + * Resolves the value of property on object. If property is a function it will be invoked with + * the this binding of object and its result returned, else the property value is returned. If + * object is false then undefined is returned. + * @param object The object to query. + * @param path The path of the property to resolve. + * @param defaultValue The value returned if the resolved value is undefined. + * @return The resolved value. + **/ + + result(object: any, path: string|string[], defaultValue?: T): T; + } + + //_.runInContext + interface LoDashStatic { + /** + * Create a new lodash function using the given context object. + * @param context The context object + * @returns The lodash function. + **/ + runInContext(context: any): typeof _; + } + + //_.times + interface LoDashStatic { + /** + * Invokes the iteratee function n times, returning an array of the results of each invocation. The iteratee is + * bound to thisArg and invoked with one argument; (index). + * + * @param n The number of times to invoke iteratee. + * @param iteratee The function invoked per iteration. + * @param thisArg The this binding of iteratee. + * @return Returns the array of results. + */ + times( + n: number, + iteratee: (num: number) => TResult, + thisArg?: any + ): TResult[]; + + /** + * @see _.times + */ + times(n: number): number[]; + } + + interface LoDashWrapper { + /** + * @see _.times + */ + times( + iteratee: (num: number) => TResult, + thisArgs?: any + ): LoDashArrayWrapper; + + /** + * @see _.times + */ + times(): LoDashArrayWrapper; + } + + //_.uniqueId + interface LoDashStatic { + /** + * Generates a unique ID. If prefix is provided the ID is appended to it. + * @param prefix The value to prefix the ID with. + * @return Returns the unique ID. + */ + uniqueId(prefix?: string): string; + } + + interface LoDashWrapper { + /** + * @see _.uniqueId + */ + uniqueId(): string; + } + + //_.constant + interface LoDashStatic { + /** + * Creates a function that returns value. + * @param value The value to return from the new function. + * @return Returns the new function. + */ + constant(value: T): () => T; + } + + interface LoDashWrapperBase { + /** + * @see _.constant + */ + constant(): () => TResult; + } + + interface ListIterator { + (value: T, index: number, collection: T[]): TResult; + } + + interface DictionaryIterator { + (value: T, key: string, collection: Dictionary): TResult; + } + + interface ObjectIterator { + (element: T, key: string, collection: any): TResult; + } + + interface MemoVoidIterator { + (prev: TResult, curr: T, indexOrKey: any, list?: T[]): void; + } + interface MemoIterator { + (prev: TResult, curr: T, indexOrKey: any, list?: T[]): TResult; + } + /* + interface MemoListIterator { + (prev: TResult, curr: T, index: number, list?: T[]): TResult; + } + interface MemoObjectIterator { + (prev: TResult, curr: T, index: string, object?: Dictionary): TResult; + } + */ + + //interface Collection {} + + // Common interface between Arrays and jQuery objects + interface List { + [index: number]: T; + length: number; + } + + interface Dictionary { + [index: string]: T; + } +} + +declare module "lodash" { + export = _; +} diff --git a/public/app/headers/moment/moment-node.d.ts b/public/app/headers/moment/moment-node.d.ts new file mode 100644 index 0000000000000..b109893a3cf74 --- /dev/null +++ b/public/app/headers/moment/moment-node.d.ts @@ -0,0 +1,479 @@ +// Type definitions for Moment.js 2.8.0 +// Project: https://github.com/timrwood/moment +// Definitions by: Michael Lakerveld , Aaron King , Hiroki Horiuchi , Dick van den Brink , Adi Dahiya , Matt Brooks +// Definitions: https://github.com/borisyankov/DefinitelyTyped + +declare module moment { + + interface MomentInput { + + /** Year */ + years?: number; + /** Year */ + year?: number; + /** Year */ + y?: number; + + /** Month */ + months?: number; + /** Month */ + month?: number; + /** Month */ + M?: number; + + /** Day/Date */ + days?: number; + /** Day/Date */ + day?: number; + /** Day/Date */ + date?: number; + /** Day/Date */ + d?: number; + + /** Hour */ + hours?: number; + /** Hour */ + hour?: number; + /** Hour */ + h?: number; + + /** Minute */ + minutes?: number; + /** Minute */ + minute?: number; + /** Minute */ + m?: number; + + /** Second */ + seconds?: number; + /** Second */ + second?: number; + /** Second */ + s?: number; + + /** Millisecond */ + milliseconds?: number; + /** Millisecond */ + millisecond?: number; + /** Millisecond */ + ms?: number; + + } + + interface Duration { + + humanize(withSuffix?: boolean): string; + + as(units: string): number; + + milliseconds(): number; + asMilliseconds(): number; + + seconds(): number; + asSeconds(): number; + + minutes(): number; + asMinutes(): number; + + hours(): number; + asHours(): number; + + days(): number; + asDays(): number; + + months(): number; + asMonths(): number; + + years(): number; + asYears(): number; + + add(n: number, p: string): Duration; + add(n: number): Duration; + add(d: Duration): Duration; + + subtract(n: number, p: string): Duration; + subtract(n: number): Duration; + subtract(d: Duration): Duration; + + toISOString(): string; + toJSON(): string; + + } + + interface Moment { + + format(format: string): string; + format(): string; + + fromNow(withoutSuffix?: boolean): string; + + startOf(unitOfTime: string): Moment; + endOf(unitOfTime: string): Moment; + + /** + * Mutates the original moment by adding time. (deprecated in 2.8.0) + * + * @param unitOfTime the unit of time you want to add (eg "years" / "hours" etc) + * @param amount the amount you want to add + */ + add(unitOfTime: string, amount: number): Moment; + /** + * Mutates the original moment by adding time. + * + * @param amount the amount you want to add + * @param unitOfTime the unit of time you want to add (eg "years" / "hours" etc) + */ + add(amount: number, unitOfTime: string): Moment; + /** + * Mutates the original moment by adding time. Note that the order of arguments can be flipped. + * + * @param amount the amount you want to add + * @param unitOfTime the unit of time you want to add (eg "years" / "hours" etc) + */ + add(amount: string, unitOfTime: string): Moment; + /** + * Mutates the original moment by adding time. + * + * @param objectLiteral an object literal that describes multiple time units {days:7,months:1} + */ + add(objectLiteral: MomentInput): Moment; + /** + * Mutates the original moment by adding time. + * + * @param duration a length of time + */ + add(duration: Duration): Moment; + + /** + * Mutates the original moment by subtracting time. (deprecated in 2.8.0) + * + * @param unitOfTime the unit of time you want to subtract (eg "years" / "hours" etc) + * @param amount the amount you want to subtract + */ + subtract(unitOfTime: string, amount: number): Moment; + /** + * Mutates the original moment by subtracting time. + * + * @param unitOfTime the unit of time you want to subtract (eg "years" / "hours" etc) + * @param amount the amount you want to subtract + */ + subtract(amount: number, unitOfTime: string): Moment; + /** + * Mutates the original moment by subtracting time. Note that the order of arguments can be flipped. + * + * @param amount the amount you want to add + * @param unitOfTime the unit of time you want to subtract (eg "years" / "hours" etc) + */ + subtract(amount: string, unitOfTime: string): Moment; + /** + * Mutates the original moment by subtracting time. + * + * @param objectLiteral an object literal that describes multiple time units {days:7,months:1} + */ + subtract(objectLiteral: MomentInput): Moment; + /** + * Mutates the original moment by subtracting time. + * + * @param duration a length of time + */ + subtract(duration: Duration): Moment; + + calendar(): string; + calendar(start: Moment): string; + + clone(): Moment; + + /** + * @return Unix timestamp, or milliseconds since the epoch. + */ + valueOf(): number; + + local(): Moment; // current date/time in local mode + + utc(): Moment; // current date/time in UTC mode + + isValid(): boolean; + invalidAt(): number; + + year(y: number): Moment; + year(): number; + quarter(): number; + quarter(q: number): Moment; + month(M: number): Moment; + month(M: string): Moment; + month(): number; + day(d: number): Moment; + day(d: string): Moment; + day(): number; + date(d: number): Moment; + date(): number; + hour(h: number): Moment; + hour(): number; + hours(h: number): Moment; + hours(): number; + minute(m: number): Moment; + minute(): number; + minutes(m: number): Moment; + minutes(): number; + second(s: number): Moment; + second(): number; + seconds(s: number): Moment; + seconds(): number; + millisecond(ms: number): Moment; + millisecond(): number; + milliseconds(ms: number): Moment; + milliseconds(): number; + weekday(): number; + weekday(d: number): Moment; + isoWeekday(): number; + isoWeekday(d: number): Moment; + weekYear(): number; + weekYear(d: number): Moment; + isoWeekYear(): number; + isoWeekYear(d: number): Moment; + week(): number; + week(d: number): Moment; + weeks(): number; + weeks(d: number): Moment; + isoWeek(): number; + isoWeek(d: number): Moment; + isoWeeks(): number; + isoWeeks(d: number): Moment; + weeksInYear(): number; + isoWeeksInYear(): number; + dayOfYear(): number; + dayOfYear(d: number): Moment; + + from(f: Moment|string|number|Date|number[], suffix?: boolean): string; + to(f: Moment|string|number|Date|number[], suffix?: boolean): string; + + diff(b: Moment): number; + diff(b: Moment, unitOfTime: string): number; + diff(b: Moment, unitOfTime: string, round: boolean): number; + + toArray(): number[]; + toDate(): Date; + toISOString(): string; + toJSON(): string; + unix(): number; + + isLeapYear(): boolean; + zone(): number; + zone(b: number): Moment; + zone(b: string): Moment; + utcOffset(): number; + utcOffset(b: number): Moment; + utcOffset(b: string): Moment; + daysInMonth(): number; + isDST(): boolean; + + isBefore(): boolean; + isBefore(b: Moment|string|number|Date|number[], granularity?: string): boolean; + + isAfter(): boolean; + isAfter(b: Moment|string|number|Date|number[], granularity?: string): boolean; + + isSame(b: Moment|string|number|Date|number[], granularity?: string): boolean; + isBetween(a: Moment|string|number|Date|number[], b: Moment|string|number|Date|number[], granularity?: string): boolean; + + // Deprecated as of 2.8.0. + lang(language: string): Moment; + lang(reset: boolean): Moment; + lang(): MomentLanguage; + + locale(language: string): Moment; + locale(reset: boolean): Moment; + locale(): string; + + localeData(language: string): Moment; + localeData(reset: boolean): Moment; + localeData(): MomentLanguage; + + // Deprecated as of 2.7.0. + max(date: Moment|string|number|Date|any[]): Moment; + max(date: string, format: string): Moment; + + // Deprecated as of 2.7.0. + min(date: Moment|string|number|Date|any[]): Moment; + min(date: string, format: string): Moment; + + get(unit: string): number; + set(unit: string, value: number): Moment; + + } + + interface MomentCalendar { + + lastDay: any; + sameDay: any; + nextDay: any; + lastWeek: any; + nextWeek: any; + sameElse: any; + + } + + interface BaseMomentLanguage { + months ?: any; + monthsShort ?: any; + weekdays ?: any; + weekdaysShort ?: any; + weekdaysMin ?: any; + relativeTime ?: MomentRelativeTime; + meridiem ?: (hour: number, minute: number, isLowercase: boolean) => string; + calendar ?: MomentCalendar; + ordinal ?: (num: number) => string; + } + + interface MomentLanguage extends BaseMomentLanguage { + longDateFormat?: MomentLongDateFormat; + } + + interface MomentLanguageData extends BaseMomentLanguage { + /** + * @param formatType should be L, LL, LLL, LLLL. + */ + longDateFormat(formatType: string): string; + } + + interface MomentLongDateFormat { + + L: string; + LL: string; + LLL: string; + LLLL: string; + LT: string; + l?: string; + ll?: string; + lll?: string; + llll?: string; + lt?: string; + + } + + interface MomentRelativeTime { + + future: any; + past: any; + s: any; + m: any; + mm: any; + h: any; + hh: any; + d: any; + dd: any; + M: any; + MM: any; + y: any; + yy: any; + + } + + interface MomentStatic { + + version: string; + fn: Moment; + + (): Moment; + (date: number): Moment; + (date: number[]): Moment; + (date: string, format?: string, strict?: boolean): Moment; + (date: string, format?: string, language?: string, strict?: boolean): Moment; + (date: string, formats: string[], strict?: boolean): Moment; + (date: string, formats: string[], language?: string, strict?: boolean): Moment; + (date: string, specialFormat: () => void, strict?: boolean): Moment; + (date: string, specialFormat: () => void, language?: string, strict?: boolean): Moment; + (date: string, formatsIncludingSpecial: any[], strict?: boolean): Moment; + (date: string, formatsIncludingSpecial: any[], language?: string, strict?: boolean): Moment; + (date: Date): Moment; + (date: Moment): Moment; + (date: Object): Moment; + + utc(): Moment; + utc(date: number): Moment; + utc(date: number[]): Moment; + utc(date: string, format?: string, strict?: boolean): Moment; + utc(date: string, format?: string, language?: string, strict?: boolean): Moment; + utc(date: string, formats: string[], strict?: boolean): Moment; + utc(date: string, formats: string[], language?: string, strict?: boolean): Moment; + utc(date: Date): Moment; + utc(date: Moment): Moment; + utc(date: Object): Moment; + + unix(timestamp: number): Moment; + + invalid(parsingFlags?: Object): Moment; + isMoment(): boolean; + isMoment(m: any): boolean; + isDate(m: any): boolean; + isDuration(): boolean; + isDuration(d: any): boolean; + + // Deprecated in 2.8.0. + lang(language?: string): string; + lang(language?: string, definition?: MomentLanguage): string; + + locale(language?: string): string; + locale(language?: string[]): string; + locale(language?: string, definition?: MomentLanguage): string; + + localeData(language?: string): MomentLanguageData; + + longDateFormat: any; + relativeTime: any; + meridiem: (hour: number, minute: number, isLowercase: boolean) => string; + calendar: any; + ordinal: (num: number) => string; + + duration(milliseconds: Number): Duration; + duration(num: Number, unitOfTime: string): Duration; + duration(input: MomentInput): Duration; + duration(object: any): Duration; + duration(): Duration; + + parseZone(date: string): Moment; + + months(): string[]; + months(index: number): string; + months(format: string): string[]; + months(format: string, index: number): string; + monthsShort(): string[]; + monthsShort(index: number): string; + monthsShort(format: string): string[]; + monthsShort(format: string, index: number): string; + + weekdays(): string[]; + weekdays(index: number): string; + weekdays(format: string): string[]; + weekdays(format: string, index: number): string; + weekdaysShort(): string[]; + weekdaysShort(index: number): string; + weekdaysShort(format: string): string[]; + weekdaysShort(format: string, index: number): string; + weekdaysMin(): string[]; + weekdaysMin(index: number): string; + weekdaysMin(format: string): string[]; + weekdaysMin(format: string, index: number): string; + + min(moments: Moment[]): Moment; + max(moments: Moment[]): Moment; + + normalizeUnits(unit: string): string; + relativeTimeThreshold(threshold: string): number|boolean; + relativeTimeThreshold(threshold: string, limit:number): boolean; + + /** + * Constant used to enable explicit ISO_8601 format parsing. + */ + ISO_8601(): void; + + defaultFormat: string; + + } + +} + +declare module 'moment' { + var moment: moment.MomentStatic; + export = moment; +} diff --git a/public/app/headers/moment/moment.d.ts b/public/app/headers/moment/moment.d.ts new file mode 100644 index 0000000000000..e7afe769fdf75 --- /dev/null +++ b/public/app/headers/moment/moment.d.ts @@ -0,0 +1,7 @@ +// Type definitions for Moment.js 2.8.0 +// Project: https://github.com/timrwood/moment +// Definitions by: Michael Lakerveld , Aaron King , Hiroki Horiuchi , Dick van den Brink , Adi Dahiya , Matt Brooks +// Definitions: https://github.com/borisyankov/DefinitelyTyped + +/// + diff --git a/public/app/headers/require/require.d.ts b/public/app/headers/require/require.d.ts new file mode 100644 index 0000000000000..c42a1bda101f9 --- /dev/null +++ b/public/app/headers/require/require.d.ts @@ -0,0 +1,369 @@ +// Type definitions for RequireJS 2.1.8 +// Project: http://requirejs.org/ +// Definitions by: Josh Baldwin +// Definitions: https://github.com/borisyankov/DefinitelyTyped + +/* +require-2.1.8.d.ts may be freely distributed under the MIT license. + +Copyright (c) 2013 Josh Baldwin https://github.com/jbaldwin/require.d.ts + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. +*/ + +declare module 'module' { + var mod: { + config: () => any; + id: string; + uri: string; + } + export = mod; +} + +interface RequireError extends Error { + + /** + * The error ID that maps to an ID on a web page. + **/ + requireType: string; + + /** + * Required modules. + **/ + requireModules: string[]; + + /** + * The original error, if there is one (might be null). + **/ + originalError: Error; +} + +interface RequireShim { + + /** + * List of dependencies. + **/ + deps?: string[]; + + /** + * Name the module will be exported as. + **/ + exports?: string; + + /** + * Initialize function with all dependcies passed in, + * if the function returns a value then that value is used + * as the module export value instead of the object + * found via the 'exports' string. + * @param dependencies + * @return + **/ + init?: (...dependencies: any[]) => any; +} + +interface RequireConfig { + + // The root path to use for all module lookups. + baseUrl?: string; + + // Path mappings for module names not found directly under + // baseUrl. + paths?: { [key: string]: any; }; + + // Dictionary of Shim's. + // does not cover case of key->string[] + shim?: { [key: string]: RequireShim; }; + + /** + * For the given module prefix, instead of loading the + * module with the given ID, substitude a different + * module ID. + * + * @example + * requirejs.config({ + * map: { + * 'some/newmodule': { + * 'foo': 'foo1.2' + * }, + * 'some/oldmodule': { + * 'foo': 'foo1.0' + * } + * } + * }); + **/ + map?: { + [id: string]: { + [id: string]: string; + }; + }; + + /** + * AMD configurations, use module.config() to access in + * define() functions + **/ + config?: { [id: string]: {}; }; + + /** + * Configures loading modules from CommonJS packages. + **/ + packages?: {}; + + /** + * The number of seconds to wait before giving up on loading + * a script. The default is 7 seconds. + **/ + waitSeconds?: number; + + /** + * A name to give to a loading context. This allows require.js + * to load multiple versions of modules in a page, as long as + * each top-level require call specifies a unique context string. + **/ + context?: string; + + /** + * An array of dependencies to load. + **/ + deps?: string[]; + + /** + * A function to pass to require that should be require after + * deps have been loaded. + * @param modules + **/ + callback?: (...modules: any[]) => void; + + /** + * If set to true, an error will be thrown if a script loads + * that does not call define() or have shim exports string + * value that can be checked. + **/ + enforceDefine?: boolean; + + /** + * If set to true, document.createElementNS() will be used + * to create script elements. + **/ + xhtml?: boolean; + + /** + * Extra query string arguments appended to URLs that RequireJS + * uses to fetch resources. Most useful to cachce bust when + * the browser or server is not configured correcty. + * + * @example + * urlArgs: "bust= + (new Date()).getTime() + **/ + urlArgs?: string; + + /** + * Specify the value for the type="" attribute used for script + * tags inserted into the document by RequireJS. Default is + * "text/javascript". To use Firefox's JavasScript 1.8 + * features, use "text/javascript;version=1.8". + **/ + scriptType?: string; + +} + +// todo: not sure what to do with this guy +interface RequireModule { + + /** + * + **/ + config(): {}; + +} + +/** +* +**/ +interface RequireMap { + + /** + * + **/ + prefix: string; + + /** + * + **/ + name: string; + + /** + * + **/ + parentMap: RequireMap; + + /** + * + **/ + url: string; + + /** + * + **/ + originalName: string; + + /** + * + **/ + fullName: string; +} + +interface Require { + + /** + * Configure require.js + **/ + config(config: RequireConfig): Require; + + /** + * CommonJS require call + * @param module Module to load + * @return The loaded module + */ + (module: string): any; + + /** + * Start the main app logic. + * Callback is optional. + * Can alternatively use deps and callback. + * @param modules Required modules to load. + **/ + (modules: string[]): void; + + /** + * @see Require() + * @param ready Called when required modules are ready. + **/ + (modules: string[], ready: Function): void; + + /** + * @see http://requirejs.org/docs/api.html#errbacks + * @param ready Called when required modules are ready. + **/ + (modules: string[], ready: Function, errback: Function): void; + + /** + * Generate URLs from require module + * @param module Module to URL + * @return URL string + **/ + toUrl(module: string): string; + + /** + * Returns true if the module has already been loaded and defined. + * @param module Module to check + **/ + defined(module: string): boolean; + + /** + * Returns true if the module has already been requested or is in the process of loading and should be available at some point. + * @param module Module to check + **/ + specified(module: string): boolean; + + /** + * On Error override + * @param err + **/ + onError(err: RequireError, errback?: (err: RequireError) => void): void; + + /** + * Undefine a module + * @param module Module to undefine. + **/ + undef(module: string): void; + + /** + * Semi-private function, overload in special instance of undef() + **/ + onResourceLoad(context: Object, map: RequireMap, depArray: RequireMap[]): void; +} + +interface RequireDefine { + + /** + * Define Simple Name/Value Pairs + * @param config Dictionary of Named/Value pairs for the config. + **/ + (config: { [key: string]: any; }): void; + + /** + * Define function. + * @param func: The function module. + **/ + (func: () => any): void; + + /** + * Define function with dependencies. + * @param deps List of dependencies module IDs. + * @param ready Callback function when the dependencies are loaded. + * callback param deps module dependencies + * callback return module definition + **/ + (deps: string[], ready: Function): void; + + /** + * Define module with simplified CommonJS wrapper. + * @param ready + * callback require requirejs instance + * callback exports exports object + * callback module module + * callback return module definition + **/ + (ready: (require: Require, exports: { [key: string]: any; }, module: RequireModule) => any): void; + + /** + * Define a module with a name and dependencies. + * @param name The name of the module. + * @param deps List of dependencies module IDs. + * @param ready Callback function when the dependencies are loaded. + * callback deps module dependencies + * callback return module definition + **/ + (name: string, deps: string[], ready: Function): void; + + /** + * Define a module with a name. + * @param name The name of the module. + * @param ready Callback function when the dependencies are loaded. + * callback return module definition + **/ + (name: string, ready: Function): void; + + /** + * Used to allow a clear indicator that a global define function (as needed for script src browser loading) conforms + * to the AMD API, any global define function SHOULD have a property called "amd" whose value is an object. + * This helps avoid conflict with any other existing JavaScript code that could have defined a define() function + * that does not conform to the AMD API. + * define.amd.jQuery is specific to jQuery and indicates that the loader is able to account for multiple version + * of jQuery being loaded simultaneously. + */ + amd: Object; +} + +// Ambient declarations for 'require' and 'define' +declare var requirejs: Require; +declare var require: Require; +declare var define: RequireDefine; diff --git a/public/app/panels/dashlist/editor.html b/public/app/panels/dashlist/editor.html index 7b176b7431768..1a0ba518c017e 100644 --- a/public/app/panels/dashlist/editor.html +++ b/public/app/panels/dashlist/editor.html @@ -1,7 +1,7 @@
    -
    +
    • Mode @@ -14,7 +14,7 @@
    -
    +
    • Search options @@ -41,7 +41,7 @@
      -
      +
      • Limit number to diff --git a/public/app/panels/dashlist/module.js b/public/app/panels/dashlist/module.js index 3e7c8c5587c31..d76664eb5c3d5 100644 --- a/public/app/panels/dashlist/module.js +++ b/public/app/panels/dashlist/module.js @@ -1,9 +1,9 @@ define([ 'angular', - 'app', + 'app/app', 'lodash', - 'config', - 'components/panelmeta', + 'app/core/config', + 'app/features/panel/panel_meta', ], function (angular, app, _, config, PanelMeta) { 'use strict'; @@ -43,8 +43,10 @@ function (angular, app, _, config, PanelMeta) { $scope.init = function() { panelSrv.init($scope); + if ($scope.panel.tag) { $scope.panel.tags = [$scope.panel.tag]; + delete $scope.panel.tag; } if ($scope.isNewPanel()) { diff --git a/public/app/panels/graph/axisEditor.html b/public/app/panels/graph/axisEditor.html index d161330f5c0d9..d8a537dda46a1 100644 --- a/public/app/panels/graph/axisEditor.html +++ b/public/app/panels/graph/axisEditor.html @@ -4,9 +4,9 @@
        • - Left Y + Left Y
        • -
        • +
        • Unit
        • -
        • -    Grid Max -
        • -
        • - -
        • -
        • - Min -
        • -
        • - -
        • Scale type
        • @@ -49,32 +33,40 @@
          • - Right Y -
          • -
          • - Unit -
          • -
          • -    Grid Max + Y-Max
          • -
          • - Min +
          • + Y-Min
          • -
          • +
          +
          +
          +
          +
            +
          • + Right Y +
          • +
          • + Unit +
          • +
          • Scale type
          • @@ -91,13 +83,38 @@
          +
          +
            +
          • + +
          • +
          • + Y-Max +
          • +
          • + +
          • +
          • + Y-Min +
          • +
          • + +
          • +
          +
          +
          +
        • - Show Axis + Show Axis
        • X-Axis  @@ -114,10 +131,10 @@
        -
        +
        • - Thresholds + Thresholds
        • Level 1 @@ -140,11 +157,7 @@
        • - Line mode  - - - +
        @@ -154,82 +167,60 @@
        -
        +
          -
        • - Legend +
        • + Legend +
        • +
        • +
        • - Show  - - +
        • - Table  - - + +
        • +
        +
        +
        +
        +
          +
        • + Hide series
        • - Right side  - - +
        • - Hide empty  - - +
        -
        -
        -
        + +
          -
        • - Legend values +
        • + Values
        • - Min  - - +
        • - Max  - - +
        • - Avg  - - +
        • - Current  - - +
        • -
        • - Total  - - +
        • +
        • -
        -
        -
        -
        -
          -
        • - Decimals +
        • + Decimals
        - -
        diff --git a/public/app/panels/graph/graph.js b/public/app/panels/graph/graph.js index 0f6ae7baeebd6..31c4e541f4024 100755 --- a/public/app/panels/graph/graph.js +++ b/public/app/panels/graph/graph.js @@ -1,9 +1,9 @@ define([ 'angular', 'jquery', - 'kbn', 'moment', 'lodash', + 'app/core/utils/kbn', './graph.tooltip', 'jquery.flot', 'jquery.flot.events', @@ -14,7 +14,7 @@ define([ 'jquery.flot.fillbelow', 'jquery.flot.crosshair' ], -function (angular, $, kbn, moment, _, GraphTooltip) { +function (angular, $, moment, _, kbn, GraphTooltip) { 'use strict'; var module = angular.module('grafana.directives'); @@ -227,7 +227,7 @@ function (angular, $, kbn, moment, _, GraphTooltip) { for (var i = 0; i < data.length; i++) { var series = data[i]; series.applySeriesOverrides(panel.seriesOverrides); - series.data = series.getFlotPairs(panel.nullPointMode, panel.y_formats); + series.data = series.getFlotPairs(series.nullPointMode || panel.nullPointMode, panel.y_formats); // if hidden remove points and disable stack if (scope.hiddenSeries[series.alias]) { @@ -285,8 +285,8 @@ function (angular, $, kbn, moment, _, GraphTooltip) { function addTimeAxis(options) { var ticks = elem.width() / 100; - var min = _.isUndefined(scope.range.from) ? null : scope.range.from.getTime(); - var max = _.isUndefined(scope.range.to) ? null : scope.range.to.getTime(); + var min = _.isUndefined(scope.range.from) ? null : scope.range.from.valueOf(); + var max = _.isUndefined(scope.range.to) ? null : scope.range.to.valueOf(); options.xaxis = { timezone: dashboard.timezone, @@ -530,8 +530,8 @@ function (angular, $, kbn, moment, _, GraphTooltip) { elem.bind("plotselected", function (event, ranges) { scope.$apply(function() { timeSrv.setTime({ - from : moment.utc(ranges.xaxis.from).toDate(), - to : moment.utc(ranges.xaxis.to).toDate(), + from : moment.utc(ranges.xaxis.from), + to : moment.utc(ranges.xaxis.to), }); }); }); diff --git a/public/app/panels/graph/graph.tooltip.js b/public/app/panels/graph/graph.tooltip.js index 33c29e4e14ac3..c56d7ce8c3037 100644 --- a/public/app/panels/graph/graph.tooltip.js +++ b/public/app/panels/graph/graph.tooltip.js @@ -52,12 +52,19 @@ function ($) { continue; } + if (!series.data.length || (scope.panel.legend.hideZero && series.allIsZero)) { + results.push({ hidden: true }); + continue; + } + hoverIndex = this.findHoverIndexFromData(pos.x, series); results.time = series.data[hoverIndex][0]; - if (scope.panel.stack) { + if (series.stack) { if (scope.panel.tooltip.value_type === 'individual') { value = series.data[hoverIndex][1]; + } else if (!series.stack) { + value = series.data[hoverIndex][1]; } else { last_value += series.data[hoverIndex][1]; value = last_value; @@ -67,7 +74,7 @@ function ($) { } // Highlighting multiple Points depending on the plot type - if (scope.panel.steppedLine || (scope.panel.stack && scope.panel.nullPointMode == "null")) { + if (series.lines.steps || series.stack) { // stacked and steppedLine plots can have series with different length. // Stacked series can increase its length on each new stacked serie if null points found, // to speed the index search we begin always on the last found hoverIndex. diff --git a/public/app/panels/graph/legend.js b/public/app/panels/graph/legend.js index beea27ec8b7c4..8604dff4f08e3 100644 --- a/public/app/panels/graph/legend.js +++ b/public/app/panels/graph/legend.js @@ -1,13 +1,11 @@ define([ 'angular', - 'app', 'lodash', - 'kbn', 'jquery', 'jquery.flot', 'jquery.flot.time', ], -function (angular, app, _, kbn, $) { +function (angular, _, $) { 'use strict'; var module = angular.module('grafana.panels.graph'); @@ -35,7 +33,12 @@ function (angular, app, _, kbn, $) { } function openColorSelector(e) { - var el = $(e.currentTarget); + // if we clicked inside poup container ignore click + if ($(e.target).parents('.popover').length) { + return; + } + + var el = $(e.currentTarget).find('.fa-minus'); var index = getSeriesIndexForElement(el); var seriesInfo = seriesList[index]; var popoverScope = scope.$new(); @@ -134,6 +137,10 @@ function (angular, app, _, kbn, $) { if (!series.legend) { continue; } + // ignore zero series + if (panel.legend.hideZero && series.allIsZero) { + continue; + } var html = '
        - - - -
        No datapoints No datapoints returned from metric query diff --git a/public/app/panels/graph/module.js b/public/app/panels/graph/module.js index ea0a1e958ad81..ff9633d0576ba 100644 --- a/public/app/panels/graph/module.js +++ b/public/app/panels/graph/module.js @@ -1,17 +1,15 @@ define([ 'angular', - 'app', - 'jquery', 'lodash', - 'kbn', 'moment', - 'components/timeSeries', - 'components/panelmeta', + 'app/core/utils/kbn', + 'app/core/time_series', + 'app/features/panel/panel_meta', './seriesOverridesCtrl', './graph', './legend', ], -function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { +function (angular, _, moment, kbn, TimeSeries, PanelMeta) { 'use strict'; var module = angular.module('grafana.panels.graph'); @@ -43,9 +41,9 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { var _d = { // datasource name, null = default datasource datasource: null, - // sets client side (flot) or native graphite png renderer (png) + // sets client side (flot) or native graphite png renderer (png) renderer: 'flot', - // Show/hide the x-axis + // Show/hide the x-axis 'x-axis' : true, // Show/hide y-axis 'y-axis' : true, @@ -130,7 +128,7 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { $scope.refreshData = function(datasource) { panelHelper.updateTimeRange($scope); - $scope.annotationsPromise = annotationsSrv.getAnnotations($scope.rangeUnparsed, $scope.dashboard); + $scope.annotationsPromise = annotationsSrv.getAnnotations($scope.dashboard); return panelHelper.issueMetricQuery($scope, datasource) .then($scope.dataHandler, function(err) { @@ -180,8 +178,8 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { var series = new TimeSeries({ datapoints: datapoints, - alias: alias, - color: color, + alias: alias, + color: color, }); if (datapoints && datapoints.length > 0) { diff --git a/public/app/panels/graph/seriesOverridesCtrl.js b/public/app/panels/graph/seriesOverridesCtrl.js index 3ded0c9ffdb94..fd5c11b2060f0 100644 --- a/public/app/panels/graph/seriesOverridesCtrl.js +++ b/public/app/panels/graph/seriesOverridesCtrl.js @@ -1,7 +1,7 @@ define([ 'angular', 'jquery', - 'app', + 'app/app', 'lodash', ], function(angular, jquery, app, _) { 'use strict'; @@ -95,6 +95,7 @@ define([ $scope.addOverrideOption('Lines', 'lines', [true, false]); $scope.addOverrideOption('Line fill', 'fill', [0,1,2,3,4,5,6,7,8,9,10]); $scope.addOverrideOption('Line width', 'linewidth', [0,1,2,3,4,5,6,7,8,9,10]); + $scope.addOverrideOption('Null point mode', 'nullPointMode', ['connected', 'null', 'null as zero']); $scope.addOverrideOption('Fill below to', 'fillBelowTo', $scope.getSeriesNames()); $scope.addOverrideOption('Staircase line', 'steppedLine', [true, false]); $scope.addOverrideOption('Points', 'points', [true, false]); diff --git a/public/app/panels/graph/styleEditor.html b/public/app/panels/graph/styleEditor.html index 5d5f2fd740110..f692328a0fdc9 100644 --- a/public/app/panels/graph/styleEditor.html +++ b/public/app/panels/graph/styleEditor.html @@ -63,7 +63,7 @@
        Tooltip
        Series specific overrides Regex match example: /server[0-3]/i
        -
        +
        • diff --git a/public/app/panels/singlestat/editor.html b/public/app/panels/singlestat/editor.html index 6b055e68bb414..f8df6d8adc912 100644 --- a/public/app/panels/singlestat/editor.html +++ b/public/app/panels/singlestat/editor.html @@ -1,5 +1,5 @@
          -
          +
          • @@ -68,7 +68,7 @@ Decimals
          • -
          @@ -79,7 +79,7 @@
          -
          +
          • Coloring @@ -121,7 +121,7 @@
            -
            +
            • Spark lines @@ -147,7 +147,7 @@
            • Fill Color
            • -
            • +
            @@ -158,7 +158,7 @@
            -
            +
            • Value to text mapping @@ -177,7 +177,7 @@
            • - +
            • diff --git a/public/app/panels/singlestat/module.js b/public/app/panels/singlestat/module.js index b38912605bbaf..18d02e166f167 100644 --- a/public/app/panels/singlestat/module.js +++ b/public/app/panels/singlestat/module.js @@ -1,13 +1,13 @@ define([ 'angular', - 'app', + 'app/app', 'lodash', - 'components/timeSeries', - 'kbn', - 'components/panelmeta', + 'app/core/utils/kbn', + 'app/core/time_series', + 'app/features/panel/panel_meta', './singleStatPanel', ], -function (angular, app, _, TimeSeries, kbn, PanelMeta) { +function (angular, app, _, kbn, TimeSeries, PanelMeta) { 'use strict'; var module = angular.module('grafana.panels.singlestat'); @@ -37,6 +37,7 @@ function (angular, app, _, TimeSeries, kbn, PanelMeta) { // Set and populate defaults var _d = { links: [], + datasource: null, maxDataPoints: 100, interval: null, targets: [{}], @@ -185,8 +186,18 @@ function (angular, app, _, TimeSeries, kbn, PanelMeta) { $scope.setValues = function(data) { data.flotpairs = []; + if($scope.series.length > 1) { + $scope.inspector.error = new Error(); + $scope.inspector.error.message = 'Multiple Series Error'; + $scope.inspector.error.data = 'Metric query returns ' + $scope.series.length + + ' series. Single Stat Panel expects a single series.\n\nResponse:\n'+JSON.stringify($scope.series); + throw $scope.inspector.error; + } + if ($scope.series && $scope.series.length > 0) { - var lastValue = _.last($scope.series[0].datapoints)[0]; + var lastPoint = _.last($scope.series[0].datapoints); + var lastValue = _.isArray(lastPoint) ? lastPoint[0] : null; + if (_.isString(lastValue)) { data.value = 0; data.valueFormated = lastValue; diff --git a/public/app/panels/singlestat/singleStatPanel.js b/public/app/panels/singlestat/singleStatPanel.js index 2463ca1e38e8f..b04f1c57dcd2e 100644 --- a/public/app/panels/singlestat/singleStatPanel.js +++ b/public/app/panels/singlestat/singleStatPanel.js @@ -1,6 +1,6 @@ define([ 'angular', - 'app', + 'app/app', 'lodash', 'jquery', 'jquery.flot', @@ -15,7 +15,7 @@ function (angular, app, _, $) { return { link: function(scope, elem) { - var data, panel; + var data, panel, linkInfo; var $panelContainer = elem.parents('.panel-container'); scope.$on('render', function() { @@ -122,8 +122,8 @@ function (angular, app, _, $) { xaxis: { show: false, mode: "time", - min: scope.range.from.getTime(), - max: scope.range.to.getTime(), + min: scope.range.from.valueOf(), + max: scope.range.to.valueOf(), }, grid: { hoverable: false, show: false }, }; @@ -170,10 +170,16 @@ function (angular, app, _, $) { } elem.toggleClass('pointer', panel.links.length > 0); + + if (panel.links.length > 0) { + linkInfo = linkSrv.getPanelLinkAnchorInfo(panel.links[0], scope.panel.scopedVars); + } else { + linkInfo = null; + } } // drilldown link tooltip - var drilldownTooltip = $('
              gello
              "'); + var drilldownTooltip = $('
              hello
              "'); elem.mouseleave(function() { if (panel.links.length === 0) { return;} @@ -181,10 +187,9 @@ function (angular, app, _, $) { }); elem.click(function() { - if (panel.links.length === 0) { return; } - var link = panel.links[0]; - var linkInfo = linkSrv.getPanelLinkAnchorInfo(link); - if (panel.links[0].targetBlank) { + if (!linkInfo) { return; } + + if (linkInfo.target === '_blank') { var redirectWindow = window.open(linkInfo.href, '_blank'); redirectWindow.location; return; @@ -202,9 +207,9 @@ function (angular, app, _, $) { }); elem.mousemove(function(e) { - if (panel.links.length === 0) { return;} + if (!linkInfo) { return;} - drilldownTooltip.text('click to go to: ' + panel.links[0].title); + drilldownTooltip.text('click to go to: ' + linkInfo.title); drilldownTooltip.place_tt(e.pageX+20, e.pageY-15); }); diff --git a/public/app/panels/table/controller.ts b/public/app/panels/table/controller.ts new file mode 100644 index 0000000000000..09e77108631d1 --- /dev/null +++ b/public/app/panels/table/controller.ts @@ -0,0 +1,115 @@ +/// + +import angular = require('angular'); +import _ = require('lodash'); +import moment = require('moment'); +import PanelMeta = require('app/features/panel/panel_meta'); + +import {TableModel} from './table_model'; + +export class TablePanelCtrl { + + /** @ngInject */ + constructor($scope, $rootScope, $q, panelSrv, panelHelper, annotationsSrv) { + $scope.ctrl = this; + $scope.pageIndex = 0; + + $scope.panelMeta = new PanelMeta({ + panelName: 'Table', + editIcon: "fa fa-table", + fullscreen: true, + metricsEditor: true, + }); + + $scope.panelMeta.addEditorTab('Options', 'app/panels/table/options.html'); + $scope.panelMeta.addEditorTab('Time range', 'app/features/panel/partials/panelTime.html'); + + var panelDefaults = { + targets: [{}], + transform: 'timeseries_to_rows', + pageSize: null, + showHeader: true, + styles: [ + { + type: 'date', + pattern: 'Time', + dateFormat: 'YYYY-MM-DD HH:mm:ss', + }, + { + unit: 'short', + type: 'number', + decimals: 2, + colors: ["rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)"], + colorMode: null, + pattern: '/.*/', + thresholds: [], + } + ], + columns: [], + scroll: true, + fontSize: '100%', + sort: {col: 0, desc: true}, + }; + + $scope.init = function() { + if ($scope.panel.styles === void 0) { + $scope.panel.styles = $scope.panel.columns; + $scope.panel.columns = $scope.panel.fields; + delete $scope.panel.columns; + delete $scope.panel.fields; + } + + _.defaults($scope.panel, panelDefaults); + panelSrv.init($scope); + }; + + $scope.refreshData = function(datasource) { + panelHelper.updateTimeRange($scope); + + $scope.pageIndex = 0; + + if ($scope.panel.transform === 'annotations') { + return annotationsSrv.getAnnotations($scope.dashboard).then(annotations => { + $scope.dataRaw = annotations; + $scope.render(); + }); + } + + return panelHelper.issueMetricQuery($scope, datasource) + .then($scope.dataHandler, function(err) { + $scope.render(); + throw err; + }); + }; + + $scope.toggleColumnSort = function(col, colIndex) { + if ($scope.panel.sort.col === colIndex) { + if ($scope.panel.sort.desc) { + $scope.panel.sort.desc = false; + } else { + $scope.panel.sort.col = null; + } + } else { + $scope.panel.sort.col = colIndex; + $scope.panel.sort.desc = true; + } + + $scope.render(); + }; + + $scope.dataHandler = function(results) { + $scope.dataRaw = results.data; + $scope.pageIndex = 0; + $scope.render(); + }; + + $scope.render = function() { + $scope.table = TableModel.transform($scope.dataRaw, $scope.panel); + $scope.table.sort($scope.panel.sort); + panelHelper.broadcastRender($scope, $scope.table, $scope.dataRaw); + }; + + $scope.init(); + } +} + diff --git a/public/app/panels/table/editor.html b/public/app/panels/table/editor.html new file mode 100644 index 0000000000000..56b49a8756797 --- /dev/null +++ b/public/app/panels/table/editor.html @@ -0,0 +1,165 @@ +
              +
              +
              Data
              +
              +
              +
                +
              • + To Table Transform +
              • +
              • + +
              • +
              +
              +
              +
              +
                +
              • + Columns +
              • +
              • + + + {{column.text}} + +
              • +
              • + +
              • +
              +
              +
              +
              +
              + +
              +
              Table Display
              +
              +
              +
                +
              • + Pagination (Page size) +
              • +
              • + +
              • +
              • + +
              • +
              • + Font size +
              • +
              • + +
              • +
              +
              +
              +
              +
              +
              + +
              +
              Column Styles
              + +
              +
              +
              +
                +
              • + +
              • +
              + +
                +
              • + Name or regex +
              • +
              • + +
              • +
              • + Type +
              • +
              • + +
              • +
              +
                +
              • + Format +
              • +
              • + +
              • +
              +
              +
              +
              +
                +
              • + Coloring +
              • +
              • + +
              • +
              • + ThresholdsComma seperated values +
              • +
              • + +
              • +
              • + Colors +
              • +
              • + + + +
              • +
              +
              +
              +
              +
                +
              • + Unit +
              • + +
              • + Decimals +
              • +
              • + +
              • +
              +
              +
              + +
              +
              + + +
              + diff --git a/public/app/panels/table/editor.ts b/public/app/panels/table/editor.ts new file mode 100644 index 0000000000000..e41ff422800f5 --- /dev/null +++ b/public/app/panels/table/editor.ts @@ -0,0 +1,111 @@ +/// + + +import angular = require('angular'); +import $ = require('jquery'); +import _ = require('lodash'); +import kbn = require('app/core/utils/kbn'); +import moment = require('moment'); + +import {transformers} from './transformers'; + +export class TablePanelEditorCtrl { + + /** @ngInject */ + constructor($scope, $q, uiSegmentSrv) { + $scope.transformers = transformers; + $scope.unitFormats = kbn.getUnitFormats(); + $scope.colorModes = [ + {text: 'Disabled', value: null}, + {text: 'Cell', value: 'cell'}, + {text: 'Value', value: 'value'}, + {text: 'Row', value: 'row'}, + ]; + $scope.columnTypes = [ + {text: 'Number', value: 'number'}, + {text: 'String', value: 'string'}, + {text: 'Date', value: 'date'}, + ]; + $scope.fontSizes = ['80%', '90%', '100%', '110%', '120%', '130%', '150%', '160%', '180%', '200%', '220%', '250%']; + $scope.dateFormats = [ + {text: 'YYYY-MM-DD HH:mm:ss', value: 'YYYY-MM-DD HH:mm:ss'}, + {text: 'MM/DD/YY h:mm:ss a', value: 'MM/DD/YY h:mm:ss a'}, + {text: 'MMMM D, YYYY LT', value: 'MMMM D, YYYY LT'}, + ]; + + $scope.addColumnSegment = uiSegmentSrv.newPlusButton(); + + $scope.getColumnOptions = function() { + if (!$scope.dataRaw) { + return $q.when([]); + } + var columns = transformers[$scope.panel.transform].getColumns($scope.dataRaw); + var segments = _.map(columns, (c: any) => uiSegmentSrv.newSegment({value: c.text})); + return $q.when(segments); + }; + + $scope.addColumn = function() { + $scope.panel.columns.push({text: $scope.addColumnSegment.value, value: $scope.addColumnSegment.value}); + $scope.render(); + + var plusButton = uiSegmentSrv.newPlusButton(); + $scope.addColumnSegment.html = plusButton.html; + }; + + $scope.transformChanged = function() { + $scope.panel.columns = []; + $scope.render(); + }; + + $scope.removeColumn = function(column) { + $scope.panel.columns = _.without($scope.panel.columns, column); + $scope.render(); + }; + + $scope.setUnitFormat = function(column, subItem) { + column.unit = subItem.value; + $scope.render(); + }; + + $scope.addColumnStyle = function() { + var columnStyleDefaults = { + unit: 'short', + type: 'number', + decimals: 2, + colors: ["rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)"], + colorMode: null, + pattern: '/.*/', + dateFormat: 'YYYY-MM-DD HH:mm:ss', + thresholds: [], + }; + + $scope.panel.styles.push(angular.copy(columnStyleDefaults)); + }; + + $scope.removeColumnStyle = function(style) { + $scope.panel.styles = _.without($scope.panel.styles, style); + }; + + $scope.getColumnNames = function() { + if (!$scope.table) { + return []; + } + return _.map($scope.table.columns, function(col: any) { + return col.text; + }); + }; + + } +} + + +export function tablePanelEditor($q, uiSegmentSrv) { + 'use strict'; + return { + restrict: 'E', + scope: true, + templateUrl: 'app/panels/table/editor.html', + controller: TablePanelEditorCtrl, + }; +} + diff --git a/public/app/panels/table/module.html b/public/app/panels/table/module.html new file mode 100644 index 0000000000000..75b9b4ce1369a --- /dev/null +++ b/public/app/panels/table/module.html @@ -0,0 +1,28 @@ +
              + +
              +
              +
              + + + + + + + + +
              +
              + {{col.text}} + + + + +
              +
              +
              +
              + +
              +
              diff --git a/public/app/panels/table/module.ts b/public/app/panels/table/module.ts new file mode 100644 index 0000000000000..a1cfdef211e91 --- /dev/null +++ b/public/app/panels/table/module.ts @@ -0,0 +1,106 @@ +/// + +import angular = require('angular'); +import $ = require('jquery'); +import _ = require('lodash'); +import kbn = require('app/core/utils/kbn'); +import moment = require('moment'); + +import {TablePanelCtrl} from './controller'; +import {TableRenderer} from './renderer'; +import {tablePanelEditor} from './editor'; + +export function tablePanel() { + 'use strict'; + return { + restrict: 'E', + templateUrl: 'app/panels/table/module.html', + controller: TablePanelCtrl, + link: function(scope, elem) { + var data; + var panel = scope.panel; + var pageCount = 0; + var formaters = []; + + function getTableHeight() { + var panelHeight = scope.height || scope.panel.height || scope.row.height; + if (_.isString(panelHeight)) { + panelHeight = parseInt(panelHeight.replace('px', ''), 10); + } + if (pageCount > 1) { + panelHeight -= 28; + } + + return (panelHeight - 60) + 'px'; + } + + function appendTableRows(tbodyElem) { + var renderer = new TableRenderer(panel, data, scope.dashboard.timezone); + tbodyElem.empty(); + tbodyElem.html(renderer.render(scope.pageIndex)); + } + + function switchPage(e) { + var el = $(e.currentTarget); + scope.pageIndex = (parseInt(el.text(), 10)-1); + renderPanel(); + } + + function appendPaginationControls(footerElem) { + footerElem.empty(); + + var pageSize = panel.pageSize || 100; + pageCount = Math.ceil(data.rows.length / pageSize); + if (pageCount === 1) { + return; + } + + var startPage = Math.max(scope.pageIndex - 3, 0); + var endPage = Math.min(pageCount, startPage + 9); + + var paginationList = $('
                '); + + for (var i = startPage; i < endPage; i++) { + var activeClass = i === scope.pageIndex ? 'active' : ''; + var pageLinkElem = $('
              • ' + (i+1) + '
              • '); + paginationList.append(pageLinkElem); + } + + footerElem.append(paginationList); + } + + function renderPanel() { + var container = elem.find('.table-panel-container'); + var rootElem = elem.find('.table-panel-scroll'); + var tbodyElem = elem.find('tbody'); + var footerElem = elem.find('.table-panel-footer'); + + appendTableRows(tbodyElem); + + container.css({'font-size': panel.fontSize}); + appendPaginationControls(footerElem); + + rootElem.css({'max-height': panel.scroll ? getTableHeight() : '' }); + } + + elem.on('click', '.table-panel-page-link', switchPage); + + scope.$on('$destroy', function() { + elem.off('click', '.table-panel-page-link'); + }); + + scope.$on('render', function(event, renderData) { + data = renderData || data; + if (!data) { + scope.get_data(); + return; + } + + renderPanel(); + }); + } + }; +} + +angular.module('grafana.directives').directive('grafanaPanelTable', tablePanel); +angular.module('grafana.directives').directive('grafanaPanelTableEditor', tablePanelEditor); diff --git a/public/app/panels/table/options.html b/public/app/panels/table/options.html new file mode 100644 index 0000000000000..d43ff958c5d5b --- /dev/null +++ b/public/app/panels/table/options.html @@ -0,0 +1,2 @@ + + diff --git a/public/app/panels/table/renderer.ts b/public/app/panels/table/renderer.ts new file mode 100644 index 0000000000000..a55a8bf2face1 --- /dev/null +++ b/public/app/panels/table/renderer.ts @@ -0,0 +1,145 @@ +/// + +import _ = require('lodash'); +import kbn = require('app/core/utils/kbn'); +import moment = require('moment'); + +export class TableRenderer { + formaters: any[]; + colorState: any; + + constructor(private panel, private table, private timezone) { + this.formaters = []; + this.colorState = {}; + } + + getColorForValue(value, style) { + if (!style.thresholds) { return null; } + + for (var i = style.thresholds.length - 1; i >= 0 ; i--) { + if (value >= style.thresholds[i]) { + return style.colors[i]; + } + } + return null; + } + + defaultCellFormater(v) { + if (v === null || v === void 0) { + return ''; + } + + if (_.isArray(v)) { + v = v.join(', '); + } + + return v; + } + + + createColumnFormater(style) { + if (!style) { + return this.defaultCellFormater; + } + + if (style.type === 'date') { + return v => { + if (_.isArray(v)) { v = v[0]; } + var date = moment(v); + if (this.timezone === 'utc') { + date = date.utc(); + } + return date.format(style.dateFormat); + }; + } + + if (style.type === 'number') { + let valueFormater = kbn.valueFormats[style.unit]; + + return v => { + if (v === null || v === void 0) { + return '-'; + } + + if (_.isString(v)) { + return v; + } + + if (style.colorMode) { + this.colorState[style.colorMode] = this.getColorForValue(v, style); + } + + return valueFormater(v, style.decimals, null); + }; + } + + return this.defaultCellFormater; + } + + formatColumnValue(colIndex, value) { + if (this.formaters[colIndex]) { + return this.formaters[colIndex](value); + } + + for (let i = 0; i < this.panel.styles.length; i++) { + let style = this.panel.styles[i]; + let column = this.table.columns[colIndex]; + var regex = kbn.stringToJsRegex(style.pattern); + if (column.text.match(regex)) { + this.formaters[colIndex] = this.createColumnFormater(style); + return this.formaters[colIndex](value); + } + } + + this.formaters[colIndex] = this.defaultCellFormater; + return this.formaters[colIndex](value); + } + + renderCell(columnIndex, value, addWidthHack = false) { + var value = this.formatColumnValue(columnIndex, value); + var style = ''; + if (this.colorState.cell) { + style = ' style="background-color:' + this.colorState.cell + ';color: white"'; + this.colorState.cell = null; + } + else if (this.colorState.value) { + style = ' style="color:' + this.colorState.value + '"'; + this.colorState.value = null; + } + + // because of the fixed table headers css only solution + // there is an issue if header cell is wider the cell + // this hack adds header content to cell (not visible) + var widthHack = ''; + if (addWidthHack) { + widthHack = '
                ' + this.table.columns[columnIndex].text + '
                '; + } + + return '' + value + widthHack + ''; + } + + render(page) { + let pageSize = this.panel.pageSize || 100; + let startPos = page * pageSize; + let endPos = Math.min(startPos + pageSize, this.table.rows.length); + var html = ""; + + for (var y = startPos; y < endPos; y++) { + let row = this.table.rows[y]; + let cellHtml = ''; + let rowStyle = ''; + for (var i = 0; i < this.table.columns.length; i++) { + cellHtml += this.renderCell(i, row[i], y === startPos); + } + + if (this.colorState.row) { + rowStyle = ' style="background-color:' + this.colorState.row + ';color: white"'; + this.colorState.row = null; + } + + html += '' + cellHtml + ''; + } + + return html; + } +} diff --git a/public/app/panels/table/specs/renderer_specs.ts b/public/app/panels/table/specs/renderer_specs.ts new file mode 100644 index 0000000000000..f8fdebb9ab076 --- /dev/null +++ b/public/app/panels/table/specs/renderer_specs.ts @@ -0,0 +1,71 @@ +import {describe, beforeEach, it, sinon, expect} from 'test/lib/common'; + +import {TableModel} from '../table_model'; +import {TableRenderer} from '../renderer'; + +describe('when rendering table', () => { + describe('given 2 columns', () => { + var table = new TableModel(); + table.columns = [ + {text: 'Time'}, + {text: 'Value'}, + {text: 'Colored'}, + {text: 'Undefined'}, + ]; + + var panel = { + pageSize: 10, + styles: [ + { + pattern: 'Time', + type: 'date', + format: 'LLL' + }, + { + pattern: 'Value', + type: 'number', + unit: 'ms', + decimals: 3, + }, + { + pattern: 'Colored', + type: 'number', + unit: 'none', + decimals: 1, + colorMode: 'value', + thresholds: [0, 50, 80], + colors: ['green', 'orange', 'red'] + } + ] + }; + + var renderer = new TableRenderer(panel, table, 'utc'); + + it('time column should be formated', () => { + var html = renderer.renderCell(0, 1388556366666); + expect(html).to.be('2014-01-01T06:06:06+00:00'); + }); + + it('number column should be formated', () => { + var html = renderer.renderCell(1, 1230); + expect(html).to.be('1.230 s'); + }); + + it('number style should ignore string values', () => { + var html = renderer.renderCell(1, 'asd'); + expect(html).to.be('asd'); + }); + + it('colored cell should have style', () => { + var html = renderer.renderCell(2, 55); + expect(html).to.be('55.0'); + }); + + it('unformated undefined should be rendered as -', () => { + var html = renderer.renderCell(3, undefined); + expect(html).to.be(''); + }); + }); +}); + + diff --git a/public/app/panels/table/specs/table_model_specs.ts b/public/app/panels/table/specs/table_model_specs.ts new file mode 100644 index 0000000000000..ad5158357300f --- /dev/null +++ b/public/app/panels/table/specs/table_model_specs.ts @@ -0,0 +1,51 @@ +import {describe, beforeEach, it, sinon, expect} from 'test/lib/common'; + +import {TableModel} from '../table_model'; + +describe('when sorting table desc', () => { + var table; + var panel = { + sort: {col: 0, desc: true}, + }; + + beforeEach(() => { + table = new TableModel(); + table.columns = [{}, {}]; + table.rows = [[100, 12], [105, 10], [103, 11]]; + table.sort(panel.sort); + }); + + it('should sort by time', () => { + expect(table.rows[0][0]).to.be(105); + expect(table.rows[1][0]).to.be(103); + expect(table.rows[2][0]).to.be(100); + }); + + it ('should mark column being sorted', () => { + expect(table.columns[0].sort).to.be(true); + expect(table.columns[0].desc).to.be(true); + }); + +}); + +describe('when sorting table asc', () => { + var table; + var panel = { + sort: {col: 1, desc: false}, + }; + + beforeEach(() => { + table = new TableModel(); + table.columns = [{}, {}]; + table.rows = [[100, 11], [105, 15], [103, 10]]; + table.sort(panel.sort); + }); + + it('should sort by time', () => { + expect(table.rows[0][1]).to.be(10); + expect(table.rows[1][1]).to.be(11); + expect(table.rows[2][1]).to.be(15); + }); + +}); + diff --git a/public/app/panels/table/specs/transformers_specs.ts b/public/app/panels/table/specs/transformers_specs.ts new file mode 100644 index 0000000000000..bb42b997d330c --- /dev/null +++ b/public/app/panels/table/specs/transformers_specs.ts @@ -0,0 +1,186 @@ +import {describe, beforeEach, it, sinon, expect} from 'test/lib/common'; + +import {TableModel} from '../table_model'; +import {transformers} from '../transformers'; + +describe('when transforming time series table', () => { + var table; + + describe('given 2 time series', () => { + var time = new Date().getTime(); + var timeSeries = [ + { + target: 'series1', + datapoints: [[12.12, time], [14.44, time+1]], + }, + { + target: 'series2', + datapoints: [[16.12, time]], + } + ]; + + describe('timeseries_to_rows', () => { + var panel = { + transform: 'timeseries_to_rows', + sort: {col: 0, desc: true}, + }; + + beforeEach(() => { + table = TableModel.transform(timeSeries, panel); + }); + + it('should return 3 rows', () => { + expect(table.rows.length).to.be(3); + expect(table.rows[0][1]).to.be('series1'); + expect(table.rows[1][1]).to.be('series1'); + expect(table.rows[2][1]).to.be('series2'); + expect(table.rows[0][2]).to.be(12.12); + }); + + it('should return 3 rows', () => { + expect(table.columns.length).to.be(3); + expect(table.columns[0].text).to.be('Time'); + expect(table.columns[1].text).to.be('Metric'); + expect(table.columns[2].text).to.be('Value'); + }); + }); + + describe('timeseries_to_columns', () => { + var panel = { + transform: 'timeseries_to_columns' + }; + + beforeEach(() => { + table = TableModel.transform(timeSeries, panel); + }); + + it ('should return 3 columns', () => { + expect(table.columns.length).to.be(3); + expect(table.columns[0].text).to.be('Time'); + expect(table.columns[1].text).to.be('series1'); + expect(table.columns[2].text).to.be('series2'); + }); + + it ('should return 2 rows', () => { + expect(table.rows.length).to.be(2); + expect(table.rows[0][1]).to.be(12.12); + expect(table.rows[0][2]).to.be(16.12); + }); + + it ('should be undefined when no value for timestamp', () => { + expect(table.rows[1][2]).to.be(undefined); + }); + }); + + describe('timeseries_aggregations', () => { + var panel = { + transform: 'timeseries_aggregations', + sort: {col: 0, desc: true}, + columns: [{text: 'Max', value: 'max'}, {text: 'Min', value: 'min'}] + }; + + beforeEach(() => { + table = TableModel.transform(timeSeries, panel); + }); + + it('should return 2 rows', () => { + expect(table.rows.length).to.be(2); + expect(table.rows[0][0]).to.be('series1'); + expect(table.rows[0][1]).to.be(14.44); + expect(table.rows[0][2]).to.be(12.12); + }); + + it('should return 2 columns', () => { + expect(table.columns.length).to.be(3); + expect(table.columns[0].text).to.be('Metric'); + expect(table.columns[1].text).to.be('Max'); + expect(table.columns[2].text).to.be('Min'); + }); + }); + + describe('JSON Data', () => { + var panel = { + transform: 'json', + columns: [ + {text: 'Timestamp', value: 'timestamp'}, + {text: 'Message', value: 'message'}, + {text: 'nested.level2', value: 'nested.level2'}, + ] + }; + var rawData = [ + { + type: 'docs', + datapoints: [ + { + timestamp: 'time', + message: 'message', + nested: { + level2: 'level2-value' + } + } + ] + } + ]; + + describe('getColumns', function() { + it('should return nested properties', function() { + var columns = transformers['json'].getColumns(rawData); + expect(columns[0].text).to.be('timestamp'); + expect(columns[1].text).to.be('message'); + expect(columns[2].text).to.be('nested.level2'); + }); + }); + + describe('transform', function() { + beforeEach(() => { + table = TableModel.transform(rawData, panel); + }); + + it ('should return 2 columns', () => { + expect(table.columns.length).to.be(3); + expect(table.columns[0].text).to.be('Timestamp'); + expect(table.columns[1].text).to.be('Message'); + expect(table.columns[2].text).to.be('nested.level2'); + }); + + it ('should return 2 rows', () => { + expect(table.rows.length).to.be(1); + expect(table.rows[0][0]).to.be('time'); + expect(table.rows[0][1]).to.be('message'); + expect(table.rows[0][2]).to.be('level2-value'); + }); + }); + }); + + describe('Annnotations', () => { + var panel = {transform: 'annotations'}; + var rawData = [ + { + min: 1000, + text: 'hej', + tags: ['tags', 'asd'], + title: 'title', + } + ]; + + beforeEach(() => { + table = TableModel.transform(rawData, panel); + }); + + it ('should return 4 columns', () => { + expect(table.columns.length).to.be(4); + expect(table.columns[0].text).to.be('Time'); + expect(table.columns[1].text).to.be('Title'); + expect(table.columns[2].text).to.be('Text'); + expect(table.columns[3].text).to.be('Tags'); + }); + + it ('should return 1 rows', () => { + expect(table.rows.length).to.be(1); + expect(table.rows[0][0]).to.be(1000); + }); + }); + + }); +}); + diff --git a/public/app/panels/table/table_model.ts b/public/app/panels/table/table_model.ts new file mode 100644 index 0000000000000..1fa4007e6e36f --- /dev/null +++ b/public/app/panels/table/table_model.ts @@ -0,0 +1,52 @@ +import {transformers} from './transformers'; + +export class TableModel { + columns: any[]; + rows: any[]; + + constructor() { + this.columns = []; + this.rows = []; + } + + sort(options) { + if (options.col === null || this.columns.length <= options.col) { + return; + } + + this.rows.sort(function(a, b) { + a = a[options.col]; + b = b[options.col]; + if (a < b) { + return -1; + } + if (a > b) { + return 1; + } + return 0; + }); + + this.columns[options.col].sort = true; + + if (options.desc) { + this.rows.reverse(); + this.columns[options.col].desc = true; + } + } + + static transform(data, panel) { + var model = new TableModel(); + + if (!data || data.length === 0) { + return model; + } + + var transformer = transformers[panel.transform]; + if (!transformer) { + throw {message: 'Transformer ' + panel.transformer + ' not found'}; + } + + transformer.transform(data, panel, model); + return model; + } +} diff --git a/public/app/panels/table/transformers.ts b/public/app/panels/table/transformers.ts new file mode 100644 index 0000000000000..a4d0d4395c5c1 --- /dev/null +++ b/public/app/panels/table/transformers.ts @@ -0,0 +1,200 @@ +/// + +import moment = require('moment'); +import _ = require('lodash'); +import flatten = require('app/core/utils/flatten'); +import TimeSeries = require('app/core/time_series'); + +var transformers = {}; + +transformers['timeseries_to_rows'] = { + description: 'Time series to rows', + getColumns: function() { + return []; + }, + transform: function(data, panel, model) { + model.columns = [ + {text: 'Time', type: 'date'}, + {text: 'Metric'}, + {text: 'Value'}, + ]; + + for (var i = 0; i < data.length; i++) { + var series = data[i]; + for (var y = 0; y < series.datapoints.length; y++) { + var dp = series.datapoints[y]; + model.rows.push([dp[1], series.target, dp[0]]); + } + } + }, +}; + +transformers['timeseries_to_columns'] = { + description: 'Time series to columns', + getColumns: function() { + return []; + }, + transform: function(data, panel, model) { + model.columns.push({text: 'Time', type: 'date'}); + + // group by time + var points = {}; + + for (var i = 0; i < data.length; i++) { + var series = data[i]; + model.columns.push({text: series.target}); + + for (var y = 0; y < series.datapoints.length; y++) { + var dp = series.datapoints[y]; + var timeKey = dp[1].toString(); + + if (!points[timeKey]) { + points[timeKey] = {time: dp[1]}; + points[timeKey][i] = dp[0]; + } + else { + points[timeKey][i] = dp[0]; + } + } + } + + for (var time in points) { + var point = points[time]; + var values = [point.time]; + + for (var i = 0; i < data.length; i++) { + var value = point[i]; + values.push(value); + } + + model.rows.push(values); + } + } +}; + +transformers['timeseries_aggregations'] = { + description: 'Time series aggregations', + getColumns: function() { + return [ + {text: 'Avg', value: 'avg'}, + {text: 'Min', value: 'min'}, + {text: 'Max', value: 'max'}, + {text: 'Total', value: 'total'}, + {text: 'Current', value: 'current'}, + {text: 'Count', value: 'count'}, + ]; + }, + transform: function(data, panel, model) { + var i, y; + model.columns.push({text: 'Metric'}); + + if (panel.columns.length === 0) { + panel.columns.push({text: 'Avg', value: 'avg'}); + } + + for (i = 0; i < panel.columns.length; i++) { + model.columns.push({text: panel.columns[i].text}); + } + + for (i = 0; i < data.length; i++) { + var series = new TimeSeries({ + datapoints: data[i].datapoints, + alias: data[i].target, + }); + + series.getFlotPairs('connected'); + var cells = [series.alias]; + + for (y = 0; y < panel.columns.length; y++) { + cells.push(series.stats[panel.columns[y].value]); + } + + model.rows.push(cells); + } + } +}; + +transformers['annotations'] = { + description: 'Annotations', + getColumns: function() { + return []; + }, + transform: function(data, panel, model) { + model.columns.push({text: 'Time', type: 'date'}); + model.columns.push({text: 'Title'}); + model.columns.push({text: 'Text'}); + model.columns.push({text: 'Tags'}); + + if (!data || data.length === 0) { + return; + } + + for (var i = 0; i < data.length; i++) { + var evt = data[i]; + model.rows.push([evt.min, evt.title, evt.text, evt.tags]); + } + } +}; + +transformers['json'] = { + description: 'JSON Data', + getColumns: function(data) { + if (!data || data.length === 0) { + return []; + } + + var names: any = {}; + for (var i = 0; i < data.length; i++) { + var series = data[i]; + if (series.type !== 'docs') { + continue; + } + + // only look at 100 docs + var maxDocs = Math.min(series.datapoints.length, 100); + for (var y = 0; y < maxDocs; y++) { + var doc = series.datapoints[y]; + var flattened = flatten(doc, null); + for (var propName in flattened) { + names[propName] = true; + } + } + } + + return _.map(names, function(value, key) { + return {text: key, value: key}; + }); + }, + transform: function(data, panel, model) { + var i, y, z; + for (i = 0; i < panel.columns.length; i++) { + model.columns.push({text: panel.columns[i].text}); + } + + if (model.columns.length === 0) { + model.columns.push({text: 'JSON'}); + } + + for (i = 0; i < data.length; i++) { + var series = data[i]; + + for (y = 0; y < series.datapoints.length; y++) { + var dp = series.datapoints[y]; + var values = []; + + if (_.isObject(dp) && panel.columns.length > 0) { + var flattened = flatten(dp, null); + for (z = 0; z < panel.columns.length; z++) { + values.push(flattened[panel.columns[z].value]); + } + } else { + values.push(JSON.stringify(dp)); + } + + model.rows.push(values); + } + } + } +}; + +export {transformers} diff --git a/public/app/panels/text/lib/showdown.js b/public/app/panels/text/lib/showdown.js index 9493071ca680b..0286b0598f10a 100644 --- a/public/app/panels/text/lib/showdown.js +++ b/public/app/panels/text/lib/showdown.js @@ -855,7 +855,7 @@ var _DoLists = function(text) { // Turn double returns into triple returns, so that we can make a // paragraph for the last item in a list, if necessary: - list = list.replace(/\n{2,}/g,"\n\n\n");; + list = list.replace(/\n{2,}/g,"\n\n\n"); var result = _ProcessListItems(list); // Trim any trailing whitespace, to put the closing `` @@ -875,7 +875,7 @@ var _DoLists = function(text) { var list_type = (m3.search(/[*+-]/g)>-1) ? "ul" : "ol"; // Turn double returns into triple returns, so that we can make a // paragraph for the last item in a list, if necessary: - var list = list.replace(/\n{2,}/g,"\n\n\n");; + list = list.replace(/\n{2,}/g,"\n\n\n"); var result = _ProcessListItems(list); result = runup + "<"+list_type+">\n" + result + "\n"; return result; @@ -1451,4 +1451,4 @@ if (typeof define === 'function' && define.amd) { define(function() { return Showdown; }); -} \ No newline at end of file +} diff --git a/public/app/panels/text/module.js b/public/app/panels/text/module.js index 436a9982b4114..c301690d2eb8f 100644 --- a/public/app/panels/text/module.js +++ b/public/app/panels/text/module.js @@ -1,9 +1,9 @@ define([ 'angular', - 'app', + 'app/app', 'lodash', 'require', - 'components/panelmeta', + 'app/features/panel/panel_meta', ], function (angular, app, _, require, PanelMeta) { 'use strict'; @@ -93,7 +93,7 @@ function (angular, app, _, require, PanelMeta) { $scope.updateContent = function(html) { try { - $scope.content = $sce.trustAsHtml(templateSrv.replace(html)); + $scope.content = $sce.trustAsHtml(templateSrv.replace(html, $scope.panel.scopedVars)); } catch(e) { console.log('Text panel error: ', e); $scope.content = $sce.trustAsHtml(html); diff --git a/public/app/panels/timepicker/editor.html b/public/app/panels/timepicker/editor.html deleted file mode 100644 index 34712ad582d2e..0000000000000 --- a/public/app/panels/timepicker/editor.html +++ /dev/null @@ -1,49 +0,0 @@ -
                -
                -
                -
                -
                  -
                • - Relative time options -
                • -
                • - -
                • -
                • - Until -
                • -
                • - now- -
                • -
                • - -
                • -
                -
                -
                -
                -
                  -
                • - Auto-refresh options -
                • -
                • - -
                • -
                -
                -
                -
                - -

                -
                - - For these changes to fully take effect save and reload the dashboard. - -

                -
                diff --git a/public/app/panels/timepicker/module.html b/public/app/panels/timepicker/module.html deleted file mode 100644 index e73516126aee8..0000000000000 --- a/public/app/panels/timepicker/module.html +++ /dev/null @@ -1,57 +0,0 @@ -
                - -
                - -
                -
                diff --git a/public/app/panels/timepicker/module.js b/public/app/panels/timepicker/module.js deleted file mode 100644 index 928c3dfe5fc04..0000000000000 --- a/public/app/panels/timepicker/module.js +++ /dev/null @@ -1,242 +0,0 @@ -/* - - ## Timepicker2 - - ### Parameters - * mode :: The default mode of the panel. Options: 'relative', 'absolute' 'since' Default: 'relative' - * time_options :: An array of possible time options. Default: ['5m','15m','1h','6h','12h','24h','2d','7d','30d'] - * timespan :: The default options selected for the relative view. Default: '15m' - * timefield :: The field in which time is stored in the document. - * refresh: Object containing refresh parameters - * enable :: true/false, enable auto refresh by default. Default: false - * interval :: Seconds between auto refresh. Default: 30 - * min :: The lowest interval a user may set -*/ -define([ - 'angular', - 'app', - 'lodash', - 'moment', - 'kbn' -], -function (angular, app, _, moment, kbn) { - 'use strict'; - - var module = angular.module('grafana.panels.timepicker', []); - app.useModule(module); - - module.controller('timepicker', function($scope, $rootScope, timeSrv) { - - $scope.panelMeta = { - status : "Stable", - description : "" - }; - - // Set and populate defaults - var _d = { - status : "Stable", - time_options : ['5m','15m','1h','6h','12h','24h','2d','7d','30d'], - refresh_intervals : ['5s','10s','30s','1m','5m','15m','30m','1h','2h','1d'], - }; - - _.defaults($scope.panel,_d); - - // ng-pattern regexs - $scope.patterns = { - date: /^[0-9]{2}\/[0-9]{2}\/[0-9]{4}$/, - hour: /^([01]?[0-9]|2[0-3])$/, - minute: /^[0-5][0-9]$/, - second: /^[0-5][0-9]$/, - millisecond: /^[0-9]*$/ - }; - - $scope.timeSrv = timeSrv; - - $scope.$on('refresh', function() { - $scope.init(); - }); - - $scope.init = function() { - var time = timeSrv.timeRange(true); - $scope.panel.now = false; - - var unparsed = timeSrv.timeRange(false); - if (_.isString(unparsed.to) && unparsed.to.indexOf('now') === 0) { - $scope.panel.now = true; - } - - $scope.time = getScopeTimeObj(time.from, time.to); - - $scope.onAppEvent('zoom-out', function() { - $scope.zoom(2); - }); - }; - - $scope.customTime = function() { - // Assume the form is valid since we're setting it to something valid - $scope.input.$setValidity("dummy", true); - $scope.temptime = cloneTime($scope.time); - $scope.temptime.now = $scope.panel.now; - - $scope.temptime.from.date.setHours(0,0,0,0); - $scope.temptime.to.date.setHours(0,0,0,0); - - // Date picker needs the date to be at the start of the day - if(new Date().getTimezoneOffset() < 0) { - $scope.temptime.from.date = moment($scope.temptime.from.date).add(1, 'days').toDate(); - $scope.temptime.to.date = moment($scope.temptime.to.date).add(1, 'days').toDate(); - } - - $scope.appEvent('show-dash-editor', {src: 'app/panels/timepicker/custom.html', scope: $scope }); - }; - - // Constantly validate the input of the fields. This function does not change any date variables - // outside of its own scope - $scope.validate = function(time) { - // Assume the form is valid. There is a hidden dummy input for invalidating it programatically. - $scope.input.$setValidity("dummy", true); - - var _from = datepickerToLocal(time.from.date), - _to = datepickerToLocal(time.to.date), - _t = time; - - if($scope.input.$valid) { - - _from.setHours(_t.from.hour,_t.from.minute,_t.from.second,_t.from.millisecond); - _to.setHours(_t.to.hour,_t.to.minute,_t.to.second,_t.to.millisecond); - - // Check that the objects are valid and to is after from - if(isNaN(_from.getTime()) || isNaN(_to.getTime()) || _from.getTime() >= _to.getTime()) { - $scope.input.$setValidity("dummy", false); - return false; - } - } else { - return false; - } - - return { from: _from, to:_to, now: time.now}; - }; - - $scope.setNow = function() { - $scope.time.to = getTimeObj(new Date()); - }; - - /* - time : { - from: Date - to: Date - } - */ - $scope.setAbsoluteTimeFilter = function (time) { - // Create filter object - var _filter = _.clone(time); - - if(time.now) { - _filter.to = "now"; - } - - // Set the filter - $scope.panel.filter_id = timeSrv.setTime(_filter); - - // Update our representation - $scope.time = getScopeTimeObj(time.from,time.to); - }; - - $scope.setRelativeFilter = function(timespan) { - $scope.panel.now = true; - - var _filter = { - from : "now-"+timespan, - to: "now" - }; - - if ($scope.panel.nowDelay) { - _filter.to = 'now-' + $scope.panel.nowDelay; - } - - timeSrv.setTime(_filter); - - $scope.time = getScopeTimeObj(kbn.parseDate(_filter.from),new Date()); - }; - - var pad = function(n, width, z) { - z = z || '0'; - n = n.toString(); - return n.length >= width ? n : new Array(width - n.length + 1).join(z) + n; - }; - - var cloneTime = function(time) { - var _n = { - from: _.clone(time.from), - to: _.clone(time.to) - }; - // Create new dates as _.clone is shallow. - _n.from.date = new Date(_n.from.date); - _n.to.date = new Date(_n.to.date); - return _n; - }; - - var getScopeTimeObj = function(from,to) { - var model = { from: getTimeObj(from), to: getTimeObj(to), }; - - if (model.from.date) { - model.tooltip = $scope.dashboard.formatDate(model.from.date) + '
                to
                '; - model.tooltip += $scope.dashboard.formatDate(model.to.date); - } - else { - model.tooltip = 'Click to set time filter'; - } - - if (timeSrv.time) { - if ($scope.panel.now) { - model.rangeString = moment(model.from.date).fromNow() + ' to ' + - moment(model.to.date).fromNow(); - } - else { - model.rangeString = $scope.dashboard.formatDate(model.from.date, 'MMM D, YYYY HH:mm:ss') + ' to ' + - $scope.dashboard.formatDate(model.to.date, 'MMM D, YYYY HH:mm:ss'); - } - } - - return model; - }; - - var getTimeObj = function(date) { - return { - date: new Date(date), - hour: pad(date.getHours(),2), - minute: pad(date.getMinutes(),2), - second: pad(date.getSeconds(),2), - millisecond: pad(date.getMilliseconds(),3) - }; - }; - - // Do not use the results of this function unless you plan to use setHour/Minutes/etc on the result - var datepickerToLocal = function(date) { - date = moment(date).clone().toDate(); - return moment(new Date(date.getTime() + date.getTimezoneOffset() * 60000)).toDate(); - }; - - $scope.zoom = function(factor) { - var range = timeSrv.timeRange(); - - var timespan = (range.to.valueOf() - range.from.valueOf()); - var center = range.to.valueOf() - timespan/2; - - var to = (center + (timespan*factor)/2); - var from = (center - (timespan*factor)/2); - - if(to > Date.now() && range.to <= Date.now()) { - var offset = to - Date.now(); - from = from - offset; - to = Date.now(); - } - - timeSrv.setTime({ - from: moment.utc(from).toDate(), - to: moment.utc(to).toDate(), - }); - }; - - }); -}); diff --git a/public/app/partials/bootstrap/tab.html b/public/app/partials/bootstrap/tab.html new file mode 100644 index 0000000000000..d76dd67caf234 --- /dev/null +++ b/public/app/partials/bootstrap/tab.html @@ -0,0 +1,3 @@ +
              • + {{heading}} +
              • diff --git a/public/app/partials/bootstrap/tabset.html b/public/app/partials/bootstrap/tabset.html new file mode 100644 index 0000000000000..acbad38390bb8 --- /dev/null +++ b/public/app/partials/bootstrap/tabset.html @@ -0,0 +1,10 @@ +
                + +
                +
                +
                +
                +
                diff --git a/public/app/partials/confirm_modal.html b/public/app/partials/confirm_modal.html index 8730cfb1596c0..24802814be1a1 100644 --- a/public/app/partials/confirm_modal.html +++ b/public/app/partials/confirm_modal.html @@ -12,7 +12,7 @@ {{title}}
                -
                + diff --git a/public/app/partials/dashboard.html b/public/app/partials/dashboard.html index 57c9cb7452151..09c80fd3341fd 100644 --- a/public/app/partials/dashboard.html +++ b/public/app/partials/dashboard.html @@ -1,10 +1,11 @@ -
                +
                +
                @@ -17,57 +18,59 @@
                - +
                -
                +
                -
                -
                +
                - -
                +
                @@ -99,9 +99,9 @@
                -
                +
                - + ADD ROW
                diff --git a/public/app/partials/help_modal.html b/public/app/partials/help_modal.html index 5b9452a94f34a..5d7abfe24907d 100644 --- a/public/app/partials/help_modal.html +++ b/public/app/partials/help_modal.html @@ -21,7 +21,7 @@ Exit fullscreen edit/view mode, close search or any editor view - CTRL+F + F Open dashboard search view (also contains import/playlist controls) diff --git a/public/app/partials/inspector.html b/public/app/partials/inspector.html index 14069f3196115..5a83afe9a3b75 100644 --- a/public/app/partials/inspector.html +++ b/public/app/partials/inspector.html @@ -10,6 +10,9 @@
                +
                @@ -49,12 +52,10 @@
                Request parameters
                -
                - -
                -
                -
                - +
                {{message}}
                +
                +{{response}}
                +			
                diff --git a/public/app/partials/login.html b/public/app/partials/login.html index a5894866d5a2e..630f295ecd336 100644 --- a/public/app/partials/login.html +++ b/public/app/partials/login.html @@ -16,8 +16,8 @@
                -